├── .bumpversion.cfg ├── .gitignore ├── .travis.yml ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── flake8_rst ├── __init__.py ├── __main__.py ├── application.py ├── checker.py ├── cli.py ├── rst.py ├── sourceblock.py └── sphinxext │ ├── __init__.py │ └── custom_roles.py ├── setup.py ├── tests ├── conftest.py ├── data │ ├── example_1.rst │ ├── example_10.py │ ├── example_11.rst │ ├── example_12.rst │ ├── example_13.rst │ ├── example_14.py │ ├── example_2.py │ ├── example_3.py │ ├── example_4.py │ ├── example_5.py │ ├── example_6.py │ ├── example_7.py │ ├── example_8.py │ └── example_9.py ├── result_py2 │ ├── result_1.py │ ├── result_10.py │ ├── result_11.py │ ├── result_12.py │ ├── result_13.py │ ├── result_2.py │ ├── result_3.py │ ├── result_4.py │ ├── result_5.py │ ├── result_6.py │ ├── result_7.py │ ├── result_8.py │ └── result_9.py ├── result_py3 │ ├── result_1.py │ ├── result_10.py │ ├── result_11.py │ ├── result_12.py │ ├── result_13.py │ ├── result_14.py │ ├── result_2.py │ ├── result_3.py │ ├── result_4.py │ ├── result_5.py │ ├── result_6.py │ ├── result_7.py │ ├── result_8.py │ └── result_9.py ├── summary_py2 │ ├── summary_1.txt │ ├── summary_10.txt │ ├── summary_11.txt │ ├── summary_12.txt │ ├── summary_13.txt │ ├── summary_2.txt │ ├── summary_3.txt │ ├── summary_4.txt │ ├── summary_5.txt │ ├── summary_6.txt │ ├── summary_7.txt │ ├── summary_8.txt │ └── summary_9.txt ├── summary_py3 │ ├── summary_1.txt │ ├── summary_10.txt │ ├── summary_11.txt │ ├── summary_12.txt │ ├── summary_13.txt │ ├── summary_14.txt │ ├── summary_2.txt │ ├── summary_3.txt │ ├── summary_4.txt │ ├── summary_5.txt │ ├── summary_6.txt │ ├── summary_7.txt │ ├── summary_8.txt │ └── summary_9.txt ├── test.py ├── test_inject_options.py ├── test_precisely.py └── test_source_block.py └── tox.ini /.bumpversion.cfg: -------------------------------------------------------------------------------- 1 | [bumpversion] 2 | current_version = 0.8.0 3 | commit = True 4 | tag = True 5 | 6 | [bumpversion:file:setup.py] 7 | search = version='{current_version}' 8 | replace = version='{new_version}' 9 | 10 | [bumpversion:file:flake8_rst/__init__.py] 11 | search = __version__ = '{current_version}' 12 | replace = __version__ = '{new_version}' 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .tox 2 | .hypothesis 3 | build 4 | dist 5 | *.egg-info/ 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | 3 | cache: 4 | apt: true 5 | directories: 6 | - $HOME/.cache/pip 7 | - $HOME/.ccache 8 | - $HOME/.pip-cache 9 | dist: trusty 10 | language: python 11 | 12 | 13 | matrix: 14 | include: 15 | - python: '2.7' 16 | env: TOXENV="py27-linux" 17 | 18 | - python: '3.6' 19 | env: TOXENV="py36-linux" 20 | 21 | - python: '3.6' 22 | env: TOXENV="check" 23 | 24 | install: pip install tox 25 | script: tox -vv 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2018 The Python Packaging Authority 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include MANIFEST.in 2 | include LICENSE 3 | include Makefile 4 | include README.md 5 | 6 | include .bumpversion.cfg 7 | 8 | graft flake8_rst 9 | graft tests 10 | 11 | include tox.ini 12 | exclude .travis.yml 13 | 14 | global-exclude .git* 15 | global-exclude __pycache__/* 16 | global-exclude **/.hypothesis* 17 | global-exclude *.py[co] 18 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | .PHONY: clean 3 | clean: 4 | -rm -rf build dist webim.egg-info htmlcov .eggs 5 | 6 | minor: 7 | bumpversion minor 8 | 9 | major: 10 | bumpversion major 11 | 12 | patch: 13 | bumpversion patch 14 | 15 | publish: 16 | pip install wheel twine 17 | python3 setup.py sdist bdist_wheel 18 | twine upload dist/* 19 | 20 | upload: clean publish 21 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # flake8-rst module 2 | [![PyPI](https://img.shields.io/pypi/v/flake8-rst.svg)](https://pypi.org/project/flake8-rst/) 3 | [![conda-forge](https://anaconda.org/conda-forge/flake8-rst/badges/version.svg)](https://anaconda.org/conda-forge/flake8-rst) 4 | [![Build Status](https://travis-ci.org/kataev/flake8-rst.svg?branch=master)](https://travis-ci.org/kataev/flake8-rst) 5 | 6 | Allows run flake8 on code snippets in docstrings or RST files. 7 | 8 | 9 | ## Idea 10 | 11 | idea proposed by Mike Bayer on https://github.com/zzzeek/sqlalchemy/pull/362 12 | 13 | > That said, if there was some alternate form of "doctest" that could simply test a code example both for Python syntax, pep8 compliance (which would be AWESOME) as well as symbol consistency, that would be helpful. The tool could be configured with common imports and symbols significant to SQLAlchemy examples and be helpful as a basic sanity check for code examples. As it is, when writing new documentation I have to organize and run the code in a separate .py file to make sure it does the right thing. So this is a problem, just my experience with doctest in writing the tutorials has shown me what it's good at and where it's likely getting in the way. 14 | 15 | Realization inspired by https://github.com/asottile/blacken-docs 16 | 17 | 18 | ## Usage 19 | You can install tool from pip `pip install flake8-rst`. 20 | 21 | Tool search `sourcecode`, `code-block` and `ipython` blocks, crop and run flake8 on it: 22 | 23 | ```text 24 | .. sourcecode:: python 25 | 26 | class Example(Base): 27 | pass 28 | ``` 29 | or 30 | 31 | ```text 32 | .. code-block:: python 33 | 34 | class Example(Base): 35 | pass 36 | ``` 37 | 38 | Supporting all flake8 arguments and flags except jobs (temporary), with additional one: 39 | ```commandline 40 | flake8-rst --bootstrap "import test" 41 | ``` 42 | 43 | flake8-rst bootstraps code snippets with this code, useful for fix import errors. 44 | Load configuration from `[flake8-rst]` ini sections, like flake8. 45 | 46 | ## Advanced Usage 47 | 48 | Custom Roles 49 | ------------ 50 | 51 | In order to use custom roles of `flake8-rst` in documentation with `Sphinx`, extend sphinx with `flake8_rst.sphinxext.custom_roles` in `conf.py`. 52 | The roles have no effect on the generated documentation. 53 | 54 | ```python 55 | extensions = [..., 56 | 'flake8_rst.sphinxext.custom_roles' 57 | ] 58 | ``` 59 | 60 | | role | | example | 61 | |-----------------------|--------------------------------------------------|--------------------------------------------| 62 | | `:flake8-group:` | Blocks with same group are combined to one. | `:flake8-group: Group1` | 63 | | | Blocks with group `None` are checked individual. | `:flake8-group: None` | 64 | | | Blocks with group `Ignore` are not checked. | `:flake8-group: Ignore` | 65 | | `:flake8-set-ignore:` | Overwrites ignore list for current block. | `:flake8-set-ignore: F821, E999` | 66 | | `:flake8-add-ignore:` | Adds arguments to ignore list for current block. | `:flake8-add-ignore: E999` | 67 | | `:flake8-set-select:` | Overwrites select list for current block. | `:flake8-set-select: E, F` | 68 | | `:flake8-add-select:` | Adds arguments to select list for current block. | `:flake8-add-select: C404` | 69 | | `:flake8-bootstrap:` | Overwrites `--bootstrap` for current block | `:flake8-bootstrap: import os; import sys` | 70 | 71 | Keep in mind: 72 | * Roles added to blocks within the same group (except group `None`) have no effect unless they appear in the first block. 73 | * provided bootstrap-code will get split by `; ` into individual lines. 74 | * `E999 SyntaxError: invalid syntax` causes `flake8` to skip `AST` tests. Keep mandatory `E999` issues in blocks with 75 | `:flake8-group: Ignore` to preserve full testing for the rest of the blocks. 76 | 77 | Default block naming 78 | -------------------- 79 | You can specify default groupnames for all directives individually: 80 | 81 | ```commandline 82 | flake8-rst --default-groupnames '->: ' 83 | ``` 84 | 85 | `file-pattern` and `directive` are matched by `Unix filename pattern matching` in the order of appearance. 86 | 87 | The default is `*.rst->*: default`, so all blocks in `*.rst` files are merged, in 88 | other files they stay individual. 89 | 90 | But it's also possible to merge only `ipython` directives in `*.rst` files and leave other directives 91 | treated individually: `"*.rst->ipython: default"` 92 | 93 | Examples: 94 | 95 | ```commandline 96 | flake8-rst --default-groupnames "*.rst->*: default" 97 | ``` 98 | 99 | ```buildoutcfg 100 | [flake8-rst] 101 | default-groupnames = 102 | *.rst-*: default 103 | *.py-code-block: default 104 | ``` 105 | 106 | ------------------------------------------------------------------------------------------------------------------------ 107 | 108 | Disconnected blocks don't know previous defined names: 109 | 110 | ```text 111 | .. code-block:: python 112 | 113 | class Example(Base): 114 | pass 115 | 116 | .. code-block:: python 117 | 118 | import datetime 119 | 120 | obj = Example(datetime.datetime.now()) # F821 undefined name 'Example' 121 | 122 | ``` 123 | 124 | Once blocks are connected, different issues are found: 125 | 126 | ```text 127 | .. code-block:: python 128 | :flake8-group: ExampleGroup 129 | 130 | class Example(Base): 131 | pass 132 | 133 | .. code-block:: python 134 | :flake8-group: ExampleGroup 135 | 136 | import datetime # E402 module level import not at top of file 137 | 138 | obj = Example(datetime.datetime.now()) 139 | 140 | ``` 141 | 142 | If appropriate, issues can be ignored for a specific group: 143 | 144 | ```text 145 | 146 | 147 | .. code-block:: python 148 | :flake8-group: ExampleGroup1 149 | :flake8-set-ignore: E402 150 | 151 | class Example(Base): 152 | pass 153 | 154 | .. code-block:: python 155 | :flake8-group: ExampleGroup1 156 | 157 | import datetime 158 | 159 | obj = Example(datetime.datetime.now()) 160 | 161 | 162 | 163 | .. code-block:: python 164 | :flake8-group: ExampleGroup2 165 | 166 | class Example(Base): 167 | pass 168 | 169 | .. code-block:: python 170 | :flake8-group: ExampleGroup2 171 | :flake8-set-ignore: E402 # no effect, because it's not defined in first 172 | # block of ExampleGroup2 173 | 174 | import datetime # E402 module level import not at top of file 175 | 176 | obj = Example(datetime.datetime.now()) 177 | 178 | 179 | ``` 180 | 181 | ## Example 182 | 183 | ```text 184 | d.kataev:flake8-rst§ flake8-rst --filename="*.py *.rst" tests/data/* --bootstrap="from sqlalchemy import Table, Column, Sequence, Integer, ForeignKey, String, DateTime" 185 | tests/data/test.py:14:42: F821 undefined name 'metadata' 186 | tests/data/test.py:15:13: E128 continuation line under-indented for visual indent 187 | tests/data/test.py:16:28: F821 undefined name 'JSONB' 188 | tests/data/test.py:19:14: F821 undefined name 'engine' 189 | tests/data/test.py:22:21: E251 unexpected spaces around keyword / parameter equals 190 | tests/data/test.py:22:23: E251 unexpected spaces around keyword / parameter equals 191 | tests/data/test.rst:27:48: F821 undefined name 'metadata' 192 | tests/data/test.rst:41:22: F821 undefined name 'meta' 193 | tests/data/test.rst:56:52: F821 undefined name 'meta' 194 | tests/data/test.rst:57:32: F821 undefined name 'meta' 195 | tests/data/test.rst:69:20: F821 undefined name 'Base' 196 | tests/data/test.rst:72:56: F821 undefined name 'Base' 197 | ``` 198 | -------------------------------------------------------------------------------- /flake8_rst/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = '0.8.0' 2 | -------------------------------------------------------------------------------- /flake8_rst/__main__.py: -------------------------------------------------------------------------------- 1 | """Module allowing for ``python -m flake8 ...``.""" 2 | from flake8_rst import cli 3 | 4 | cli.main() 5 | -------------------------------------------------------------------------------- /flake8_rst/application.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import time 3 | 4 | from flake8.main import options 5 | from flake8.main.application import Application as Flake8Application 6 | from flake8.options import manager 7 | 8 | from . import __version__ 9 | from . import checker 10 | 11 | 12 | class Application(Flake8Application): 13 | def __init__(self, program='flake8-rst', version=__version__): 14 | self.start_time = time.time() 15 | self.end_time = None # type: float 16 | self.program = program 17 | self.version = version 18 | self.prelim_arg_parser = argparse.ArgumentParser(add_help=False) 19 | options.register_preliminary_options(self.prelim_arg_parser) 20 | 21 | # super(Application, self).__init__(program, version) doesn't work 22 | # because flake8 has hardcoded 'flake8' in this code snippet: 23 | self.option_manager = manager.OptionManager( 24 | prog=program, 25 | version=version, 26 | parents=[self.prelim_arg_parser], 27 | ) 28 | options.register_default_options(self.option_manager) 29 | 30 | self.check_plugins = None # type: plugin_manager.Checkers 31 | self.formatting_plugins = None # type: plugin_manager.ReportFormatters 32 | self.formatter = None # type: BaseFormatter 33 | self.guide = None # type: style_guide.StyleGuideManager 34 | self.file_checker_manager = None # type: checker.Manager 35 | self.options = None # type: argparse.Namespace 36 | self.args = None # type: List[str] 37 | self.result_count = 0 38 | self.total_result_count = 0 39 | self.catastrophic_failure = False 40 | self.running_against_diff = False 41 | self.parsed_diff = {} # type: Dict[str, Set[int]] 42 | 43 | self.option_manager.add_option( 44 | '--bootstrap', default=None, parse_from_config=True, 45 | help='Bootstrap code snippets. Useful for add imports.', 46 | ) 47 | self.option_manager.add_option( 48 | '--default-groupnames', default="*.rst->*: default", parse_from_config=True, 49 | help='Set default group names.', type='string', 50 | ) 51 | 52 | def make_file_checker_manager(self): 53 | if self.file_checker_manager is None: 54 | self.file_checker_manager = checker.RstManager( 55 | style_guide=self.guide, 56 | arguments=self.args, 57 | checker_plugins=self.check_plugins, 58 | ) 59 | -------------------------------------------------------------------------------- /flake8_rst/checker.py: -------------------------------------------------------------------------------- 1 | import optparse 2 | 3 | from flake8.checker import FileChecker, Manager, LOG 4 | from flake8.processor import FileProcessor 5 | from flake8.style_guide import DecisionEngine 6 | from flake8 import exceptions 7 | 8 | from .rst import find_sourcecode 9 | 10 | ROLES = ['set-ignore', 'set-select', 'add-ignore', 'add-select'] 11 | 12 | 13 | class RstManager(Manager): 14 | 15 | def _job_count(self): 16 | return 0 17 | 18 | def make_checkers(self, paths=None): 19 | super(RstManager, self).make_checkers(paths) 20 | checkers = [] 21 | for checker in self.checkers: 22 | src = ''.join(checker.processor.read_lines()) 23 | for i, source_block in enumerate(find_sourcecode(checker.filename, checker.options, src)): 24 | checker = RstFileChecker.from_sourcecode( 25 | filename=checker.filename, checks=checker.checks, options=self.options, 26 | style_guide=self.style_guide, source_block=source_block 27 | ) 28 | checkers.append(checker) 29 | 30 | self.checkers = checkers 31 | self._all_checkers = checkers 32 | 33 | LOG.info('Checking %d blocks', len(self.checkers)) 34 | 35 | 36 | def inject_options(roles, options): 37 | new_options = optparse.Values(options.__dict__) 38 | for key in ('ignore', 'select'): 39 | 40 | if 'set-' + key in roles: 41 | values = [value.strip() for value in roles['set-' + key].split(',')] 42 | setattr(new_options, key, values) 43 | 44 | if 'add-' + key in roles: 45 | values = {value.strip() for value in roles['add-' + key].split(',')} 46 | values.update(new_options.__dict__[key]) 47 | setattr(new_options, key, list(values)) 48 | 49 | return new_options 50 | 51 | 52 | class RstFileChecker(FileChecker): 53 | def __init__(self, filename, checks, options, style_guide=None, source_block=None): 54 | self.style_guide = style_guide 55 | self.source_block = source_block 56 | 57 | if source_block: 58 | options = inject_options(source_block.roles, options) 59 | 60 | if self.style_guide: 61 | self.decider = DecisionEngine(options) 62 | 63 | super(RstFileChecker, self).__init__(filename, checks, options) 64 | 65 | @classmethod 66 | def from_sourcecode(cls, style_guide, source_block, **kwargs): 67 | return RstFileChecker(style_guide=style_guide, source_block=source_block, **kwargs) 68 | 69 | def _make_processor(self): 70 | content = self.source_block.complete_block if self.source_block else '' 71 | return FileProcessor(self.filename, self.options, lines=content.splitlines(True)) 72 | 73 | def report(self, error_code, line_number, column, text, line=None): 74 | try: 75 | line = self.source_block.get_code_line(line_number) 76 | if line['lineno'] == 0: 77 | return error_code 78 | 79 | if error_code is None: 80 | error_code, text = text.split(" ", 1) 81 | 82 | # If we're recovering from a problem in _make_processor, we will not 83 | # have this attribute. 84 | if hasattr(self, "processor"): 85 | try: 86 | self.processor.file_tokens 87 | source = self.processor.noqa_line_for(line_number) 88 | except exceptions.InvalidSyntax: 89 | source = line['source'] 90 | else: 91 | source = None 92 | 93 | if source: 94 | source = line['raw_source'][:line['indent']] + source 95 | 96 | self.results.append((error_code, line['lineno'], column + line['indent'], text, source)) 97 | return error_code 98 | except IndexError: 99 | return error_code 100 | 101 | def __getattribute__(self, name): 102 | if name == 'results' and self.style_guide: 103 | self.style_guide.decider = self.decider 104 | return super(RstFileChecker, self).__getattribute__(name) 105 | -------------------------------------------------------------------------------- /flake8_rst/cli.py: -------------------------------------------------------------------------------- 1 | """Command-line implementation of flake8.""" 2 | from flake8_rst import application 3 | 4 | 5 | def main(argv=None): 6 | # type: (Union[NoneType, List[str]]) -> NoneType 7 | """Execute the main bit of the application. 8 | 9 | This handles the creation of an instance of :class:`Application`, runs it, 10 | and then exits the application. 11 | 12 | :param list argv: 13 | The arguments to be passed to the application for parsing. 14 | """ 15 | app = application.Application() 16 | app.run(argv) 17 | app.exit() 18 | -------------------------------------------------------------------------------- /flake8_rst/rst.py: -------------------------------------------------------------------------------- 1 | import re 2 | from fnmatch import fnmatch 3 | from functools import wraps 4 | 5 | from .sourceblock import SourceBlock 6 | 7 | COMMENT_RE = re.compile(r'(#.*$)', re.MULTILINE) 8 | 9 | RST_RE = re.compile( 10 | r'(?P' 11 | r'^(?P *)\.\. (?Pcode-block|sourcecode|ipython)::( (?Pi?python|pycon))?\n' 12 | r'(?P(^(?P=indent) +:\S+:.*\n)*)' 13 | r'\n*' 14 | r')' 15 | r'(?P(^((?P=indent) {3} *.*)?\n)+(^(?P=indent) {3} *.*(\n)?))', 16 | re.MULTILINE, 17 | ) 18 | 19 | DOCSTRING_RE = re.compile( 20 | r'(?P\n?)' 21 | r'^(?P((?P *)r*\"{3}.*\n(?:(?:(?P=indent).+)?\n)*(?P=indent)\"{3}))', 22 | re.MULTILINE, 23 | ) 24 | 25 | 26 | def merge_by_group(func): 27 | 28 | @wraps(func) 29 | def func_wrapper(*args, **kwargs): 30 | blocks = {} 31 | for block in func(*args, **kwargs): 32 | group = block.roles['group'] 33 | if group == 'None': 34 | yield block 35 | elif group == 'Ignore': 36 | continue 37 | else: 38 | data = blocks.setdefault(group, []) 39 | data.append(block) 40 | for merge_blocks in blocks.values(): 41 | yield SourceBlock.merge(merge_blocks) 42 | 43 | return func_wrapper 44 | 45 | 46 | def apply_directive_specific_options(func): 47 | @wraps(func) 48 | def func_wrapper(*args, **kwargs): 49 | for block in func(*args, **kwargs): 50 | if block.directive == "ipython": 51 | previous = block.roles.setdefault('add-ignore', '') 52 | if previous: 53 | block.roles['add-ignore'] += ', ' + 'E302, E305' 54 | else: 55 | block.roles['add-ignore'] = 'E302, E305' 56 | yield block 57 | 58 | return func_wrapper 59 | 60 | 61 | def apply_default_groupnames(func): 62 | def resolve_mapping(mappings, pattern, split): 63 | for entry in mappings: 64 | key, values = entry.split(split, 1) 65 | if fnmatch(pattern, key.strip()): 66 | yield values.strip() 67 | 68 | @wraps(func) 69 | def func_wrapper(filename, options, *args, **kwargs): 70 | default_groupnames = re.sub(COMMENT_RE, '', options.default_groupnames) 71 | lines = default_groupnames.split(',' if ',' in default_groupnames else '\n') 72 | groupnames = list(resolve_mapping(lines, filename, '->')) 73 | 74 | for block in func(filename, options, *args, **kwargs): 75 | groupname = next(resolve_mapping(groupnames, block.directive, ':'), 'None') 76 | block.roles.setdefault('group', groupname) 77 | yield block 78 | 79 | return func_wrapper 80 | 81 | 82 | @apply_directive_specific_options 83 | @merge_by_group 84 | @apply_default_groupnames 85 | def find_sourcecode(filename, options, src): 86 | contains_python_code = filename.split('.')[-1].startswith('py') 87 | source = SourceBlock.from_source(options.bootstrap, src) 88 | source_blocks = source.find_blocks(DOCSTRING_RE) if contains_python_code else [source] 89 | 90 | for source_block in source_blocks: 91 | inner_blocks = source_block.find_blocks(RST_RE) 92 | found_inner_block = False 93 | for inner_block in inner_blocks: 94 | found_inner_block = True 95 | inner_block.clean() 96 | yield inner_block 97 | 98 | if not found_inner_block and source_block.clean_doctest(): 99 | source_block.clean() 100 | yield source_block 101 | -------------------------------------------------------------------------------- /flake8_rst/sourceblock.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | import sys 3 | 4 | import doctest 5 | import operator 6 | import re 7 | 8 | try: 9 | if sys.version_info > (3, 5): 10 | import IPython.core.inputtransformer2 as ipt 11 | 12 | transform_manager = ipt.TransformerManager() 13 | transform_manager.cleanup_transforms.clear() 14 | transform_cell = transform_manager.transform_cell 15 | RUN_MAGIC_RE = re.compile(r"get_ipython\(\)\.run_line_magic\('(?:time(?:it)?)', (?P(['\"]))(.*)(?P=x)\)", 16 | re.MULTILINE) 17 | else: 18 | from IPython.core import inputsplitter as ipt 19 | 20 | transformer = ipt.IPythonInputSplitter() 21 | transform_cell = transformer.transform_cell 22 | RUN_MAGIC_RE = re.compile(r"get_ipython\(\)\.magic\(u(?P(['\"]))(?:time(?:it)?) (.*)(?P=x)\)", 23 | re.MULTILINE) 24 | except ImportError: 25 | ipt = transform_cell = None 26 | 27 | LINENO, SOURCE, RAW = range(3) 28 | 29 | ROLE_RE = re.compile(r':flake8-(?P\S*):\s?(?P.*)$', re.MULTILINE) 30 | 31 | INDENT_RE = re.compile(r'(?P^ *).', re.MULTILINE) 32 | 33 | DEFAULT_IGNORED_LINES = [re.compile(r'get_ipython\(\)|^@(savefig\s.*|ok(except|warning)|verbatim|doctest)$')] 34 | 35 | IPYTHON_START_RE = re.compile(r'In \[(?P\d+)\]:\s?(?P.*\n)') 36 | IPYTHON_FOLLOW_RE = re.compile(r'^\.{3}:\s?(?P.*\n)') 37 | 38 | ROLES = ['group', 'bootstrap'] 39 | 40 | 41 | def _match_default(match, group, default=None): 42 | try: 43 | return match.group(group) 44 | except IndexError: 45 | return default 46 | 47 | 48 | def _extract_roles(role_block): 49 | roles = {} 50 | if not role_block: 51 | return roles 52 | for match in ROLE_RE.finditer(role_block): 53 | roles[match.group('role')] = match.group('value').partition(' #')[0].strip() 54 | return roles 55 | 56 | 57 | class SourceBlock(object): 58 | @classmethod 59 | def from_source(cls, bootstrap, src, start_line=1, **kwargs): 60 | if bootstrap: 61 | boot_lines = SourceBlock.convert_bootstrap(bootstrap) 62 | else: 63 | boot_lines = [] 64 | code_lines = [(i, line, line) for i, line in enumerate(src.splitlines(True), start=start_line)] 65 | return cls(boot_lines, code_lines, **kwargs) 66 | 67 | @staticmethod 68 | def convert_bootstrap(bootstrap, split='\n'): 69 | return [(0, line + '\n', line + '\n') for line in bootstrap.split(split)] 70 | 71 | @classmethod 72 | def merge(cls, source_blocks): 73 | """Merge multiple SourceBlocks together""" 74 | 75 | if len(source_blocks) == 1: 76 | return source_blocks[0] 77 | 78 | source_blocks.sort(key=operator.attrgetter('start_line_number')) 79 | main_block = source_blocks[0] 80 | boot_lines = main_block.boot_lines 81 | source_lines = [source_line for source_block in source_blocks for source_line in source_block.source_lines] 82 | 83 | return cls(boot_lines, source_lines, directive=main_block.directive, 84 | language=main_block.language, roles=main_block.roles) 85 | 86 | def __init__(self, boot_lines, source_lines, directive='', language='', roles=None): 87 | self._boot_lines = boot_lines 88 | self._source_lines = source_lines 89 | self.directive = directive 90 | self.language = language 91 | self.roles = roles or {} 92 | 93 | if 'bootstrap' in self.roles: 94 | self._boot_lines = SourceBlock.convert_bootstrap(self.roles['bootstrap'], split='; ') 95 | 96 | @property 97 | def boot_lines(self): 98 | return self._boot_lines 99 | 100 | @property 101 | def source_lines(self): 102 | return self._source_lines 103 | 104 | @property 105 | def source_block(self): 106 | """Return code lines **without** bootstrap""" 107 | return "".join(line[SOURCE] for line in self._source_lines) 108 | 109 | @property 110 | def complete_block(self): 111 | """Return code lines **with** bootstrap""" 112 | return "".join(line[SOURCE] for line in self._boot_lines + self._source_lines) 113 | 114 | @property 115 | def start_line_number(self): 116 | return self._source_lines[0][LINENO] 117 | 118 | def get_code_line(self, lineno): 119 | all_lines = self._boot_lines + self._source_lines 120 | line = all_lines[lineno - 1] 121 | return {'lineno': line[LINENO], 'indent': len(line[RAW]) - len(line[SOURCE]), 122 | 'source': line[SOURCE], 'raw_source': line[RAW]} 123 | 124 | def find_blocks(self, expression): 125 | src = self.source_block 126 | for match in expression.finditer(src): 127 | origin_code = str(match.group('code')) 128 | line_start = src[:match.start()].count('\n') + match.group('before').count('\n') 129 | source_slice = slice(line_start, line_start + len(origin_code.splitlines(True))) 130 | directive = _match_default(match, 'directive', '') 131 | language = _match_default(match, 'language', '') 132 | roles = _extract_roles(_match_default(match, 'roles')) 133 | 134 | source_block = SourceBlock(self._boot_lines, self._source_lines[source_slice], directive=directive, 135 | language=language, roles=roles) 136 | source_block.remove_indentation() 137 | yield source_block 138 | 139 | def remove_indentation(self): 140 | indentation = min(INDENT_RE.findall(self.source_block)) 141 | if indentation: 142 | indent = len(indentation) 143 | source_lines = [(line[LINENO], line[SOURCE][indent:-1] + line[SOURCE][-1], line[RAW]) 144 | for line in self._source_lines] 145 | self._source_lines = source_lines 146 | 147 | def clean(self): 148 | for func in (self.clean_doctest, self.clean_ipython): 149 | if func(): 150 | break 151 | 152 | self.clean_console_syntax() 153 | self.clean_ignored_lines() 154 | 155 | def clean_doctest(self): 156 | try: 157 | lines = doctest.DocTestParser().get_examples(self.source_block) 158 | except ValueError: 159 | return None 160 | 161 | source_lines = [source_line for line in lines 162 | for source_line in self._overwritten_source(line.source, line.lineno)] 163 | 164 | if source_lines: 165 | self._source_lines = source_lines 166 | return True 167 | return False 168 | 169 | def clean_ipython(self): 170 | source_lines = [] 171 | src = '' 172 | lineno = follow = None 173 | for i, line in enumerate(self._source_lines): 174 | match = IPYTHON_START_RE.match(line[SOURCE]) 175 | if match: 176 | lineno = i if lineno is None else lineno 177 | follow = len(match.group('lineno')) + 2 178 | src += match.group('code') 179 | continue 180 | if not follow: 181 | continue 182 | match = IPYTHON_FOLLOW_RE.match(line[SOURCE][follow:]) 183 | if match: 184 | src += match.group('code') 185 | continue 186 | 187 | source_lines.extend(self._overwritten_source(src, lineno)) 188 | 189 | src = '' 190 | lineno = follow = None 191 | 192 | source_lines.extend(self._overwritten_source(src, lineno)) 193 | 194 | if source_lines: 195 | self._source_lines = source_lines 196 | return True 197 | return False 198 | 199 | def clean_console_syntax(self): 200 | if not transform_cell: 201 | return False 202 | block = self.source_block 203 | source_block = transform_cell(block) 204 | source_block = re.sub(RUN_MAGIC_RE, r'\3', source_block) 205 | 206 | if block != source_block: 207 | self._source_lines = list(self._overwritten_source(source_block)) 208 | return True 209 | return False 210 | 211 | def _overwritten_source(self, src, start_line=0): 212 | for line, (lineno, _, raw) in zip(src.splitlines(True), itertools.islice(self._source_lines, start_line, None)): 213 | if not line.startswith('get_ipython') and line not in raw: 214 | raise ValueError 215 | 216 | yield (lineno, line, raw) 217 | 218 | def clean_ignored_lines(self): 219 | for i, (_, source, _) in enumerate(self._source_lines): 220 | for pattern in DEFAULT_IGNORED_LINES: 221 | if pattern.match(source): 222 | self._source_lines.pop(i) 223 | -------------------------------------------------------------------------------- /flake8_rst/sphinxext/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flake8-docs/flake8-rst/53ee9906661b001a6aecc06ce09cf093ce6e82df/flake8_rst/sphinxext/__init__.py -------------------------------------------------------------------------------- /flake8_rst/sphinxext/custom_roles.py: -------------------------------------------------------------------------------- 1 | from docutils.parsers.rst import directives 2 | from sphinx.directives.code import CodeBlock 3 | 4 | from ..sourceblock import ROLES as SOURCEBLOCK_ROLES 5 | from ..checker import ROLES as CHECKER_ROLES 6 | 7 | try: 8 | from IPython.sphinxext.ipython_directive import IPythonDirective 9 | except ImportError: 10 | IPythonDirective = None 11 | 12 | 13 | def add_custom_roles(directive_class): 14 | if not directive_class: 15 | return 16 | for role in SOURCEBLOCK_ROLES + CHECKER_ROLES: 17 | directive_class.option_spec['flake8-' + role] = directives.unchanged 18 | 19 | 20 | def setup(app): 21 | add_custom_roles(CodeBlock) 22 | add_custom_roles(IPythonDirective) 23 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | requires = [ 4 | "flake8 >= 3.5.0", 5 | ] 6 | 7 | with open("README.md", "r") as fh: 8 | long_description = fh.read() 9 | 10 | 11 | setup( 12 | name='flake8-rst', 13 | version='0.8.0', 14 | license="MIT", 15 | packages=find_packages(), 16 | url='https://github.com/kataev/flake8-rst', 17 | author='Denis Kataev', 18 | author_email='denis.a.kataev@gmail.com', 19 | install_requires=requires, 20 | description='flake8 for code in rst files and docstrings', 21 | long_description=long_description, 22 | long_description_content_type="text/markdown", 23 | classifiers=( 24 | "Programming Language :: Python :: 3", 25 | "Programming Language :: Python :: 2", 26 | "License :: OSI Approved :: MIT License", 27 | "Operating System :: OS Independent", 28 | ), 29 | 30 | entry_points={ 31 | 'console_scripts': [ 32 | 'flake8-rst = flake8_rst.cli:main' 33 | ] 34 | } 35 | ) 36 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | import ast 4 | import sys 5 | 6 | try: 7 | import pathlib 8 | except ImportError: 9 | import pathlib2 as pathlib 10 | import pprint 11 | 12 | import pytest 13 | 14 | ROOT_DIR = pathlib.Path(__file__).parent 15 | DATA_DIR = ROOT_DIR / 'data' 16 | RESULT_DIR = ROOT_DIR / ('result_py%s' % (sys.version_info[0])) 17 | SUMMARY_DIR = ROOT_DIR / ('summary_py%s' % (sys.version_info[0])) 18 | 19 | 20 | @pytest.fixture() 21 | def data_dir(): 22 | return DATA_DIR 23 | 24 | 25 | def read_ast(self): 26 | with self.open() as f: 27 | return ast.literal_eval(f.read()) 28 | 29 | 30 | def write_ast(self, data): 31 | if sys.version_info[0] < 3: 32 | o = 'wb' 33 | else: 34 | o = 'w' 35 | 36 | with self.open(o) as f: 37 | pprint.pprint(data, stream=f, width=120) 38 | 39 | 40 | pathlib.Path.read_ast = read_ast 41 | pathlib.Path.write_ast = write_ast 42 | 43 | 44 | def pytest_addoption(parser): 45 | parser.addoption('--refresh', action='store_true', help='Refresh tests') 46 | 47 | 48 | def pytest_generate_tests(metafunc): 49 | files = {} 50 | if 'checker' in metafunc.fixturenames and 'result' in metafunc.fixturenames: 51 | optional_files = list(RESULT_DIR.glob('*')) 52 | parameterize = 'checker,result' 53 | result_prefix = 'result' 54 | elif 'summary' in metafunc.fixturenames and 'result' in metafunc.fixturenames: 55 | optional_files = list(SUMMARY_DIR.glob('*')) 56 | parameterize = 'summary,result' 57 | result_prefix = 'summary' 58 | else: 59 | optional_files = [] 60 | parameterize = None 61 | result_prefix = None 62 | 63 | for f in list(DATA_DIR.glob('*')) + optional_files: 64 | name, number = f.stem.split('_') 65 | data = files.setdefault(number, [None, None]) 66 | 67 | if name == 'example': 68 | i = 0 69 | elif name == result_prefix: 70 | i = 1 71 | else: 72 | raise ValueError('Not properly configured') 73 | 74 | data[i] = f 75 | 76 | if 'source' in metafunc.fixturenames: 77 | source = list(DATA_DIR.glob('*')) 78 | ids = [f.name for f in source] 79 | 80 | metafunc.parametrize('source', source, ids=ids) 81 | 82 | if parameterize: 83 | ids, values = zip(*files.items()) 84 | metafunc.parametrize(parameterize, values, ids=ids, indirect=True) 85 | -------------------------------------------------------------------------------- /tests/data/example_1.rst: -------------------------------------------------------------------------------- 1 | Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia 2 | deserunt mollit anim id est laborum. 3 | 4 | .. sourcecode:: pycon 5 | 6 | >>> # extract 100 LDA topics, using default parameters 7 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 8 | 9 | using distributed version with 4 workers 10 | running online LDA training, 100 topics, 1 passes over the supplied corpus of 3199665 documets 11 | -------------------------------------------------------------------------------- /tests/data/example_10.py: -------------------------------------------------------------------------------- 1 | """ 2 | Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia 3 | deserunt mollit anim id est laborum. 4 | 5 | >>> # extract 100 LDA topics, using default parameters 6 | >>> lda = LdaModel(corpus=mm, id2word=id2word, 7 | ... num_topics=100, distributed=distribution_required) 8 | Intermediate output 9 | 10 | .. code-block:: 11 | >>> # extract 100 LDA topics, using default parameters 12 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 13 | Final output 14 | 15 | Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia 16 | deserunt mollit anim id est laborum. 17 | """ 18 | -------------------------------------------------------------------------------- /tests/data/example_11.rst: -------------------------------------------------------------------------------- 1 | Tuples are heterogenous, ordered collections. 2 | 3 | Each element in a tuple is a value, and can be in multiple tuples and have multiple names (or no name) 4 | 5 | .. code-block:: ipython 6 | 7 | In [8]: name = 'Brian' 8 | 9 | In [9]: other = brian 10 | 11 | In [10]: %timeit a = (1, 2,name) # noqa: F821 12 | 13 | In [11]: b = (3, 4, other) 14 | 15 | In [12]: for i in range(3): 16 | ....: print(a[i] is b[i]) 17 | ....: 18 | 19 | Out[13]: False False True 20 | 21 | .. nextslide:: Lists vs. Tuples 22 | 23 | .. rst-class:: center large 24 | 25 | So Why Have Both? 26 | -------------------------------------------------------------------------------- /tests/data/example_12.rst: -------------------------------------------------------------------------------- 1 | .. ipython:: python 2 | :suppress: 3 | import os 4 | import sys 5 | 6 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 7 | 8 | 9 | Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia 10 | deserunt mollit anim id est laborum. 11 | 12 | .. ipython:: python 13 | import matplotlib.pyplot as plt 14 | 15 | @savefig plot_simple.png width=4in 16 | plt.plot([1, 2, 3, 4]); plt.ylabel('some numbers'); 17 | 18 | Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia 19 | deserunt mollit anim id est laborum. 20 | 21 | .. ipython:: python 22 | import numpy as np 23 | 24 | @savefig hist_simple.png width=4in 25 | hist(np.random.randn(10000), 100) 26 | -------------------------------------------------------------------------------- /tests/data/example_13.rst: -------------------------------------------------------------------------------- 1 | Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia 2 | deserunt mollit anim id est laborum. 3 | 4 | >>> # extract 100 LDA topics, using default parameters 5 | >>> lda = LdaModel(corpus=mm, id2word=id2word, 6 | ... num_topics=100, distributed=distribution_required) 7 | Intermediate output 8 | >>> # extract 100 LDA topics, using default parameters 9 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 10 | Final output 11 | 12 | Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia 13 | deserunt mollit anim id est laborum. -------------------------------------------------------------------------------- /tests/data/example_14.py: -------------------------------------------------------------------------------- 1 | def apply(): 2 | r""" 3 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, 4 | deserunt mollit anim id est laborum. 5 | 6 | >>> x = 10 7 | >>> e3 = x < 1 8 | 9 | using distributed version with 4 workers 10 | running online LDA training, 100 topics, 1 passes over the supplied corpus of 3199665 documets, 11 | updating model once every 40000 documents 12 | .. 13 | 14 | Some another text 15 | """ 16 | 17 | some_field = 1 18 | -------------------------------------------------------------------------------- /tests/data/example_2.py: -------------------------------------------------------------------------------- 1 | class Test: 2 | """ 3 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, 4 | deserunt mollit anim id est laborum. 5 | 6 | .. sourcecode:: pycon 7 | 8 | >>> # extract 100 LDA topics, using default parameters 9 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 10 | 11 | using distributed version with 4 workers 12 | running online LDA training, 100 topics, 1 passes over the supplied corpus of 3199665 documets, 13 | updating model once every 40000 documents 14 | .. 15 | 16 | Some another text 17 | """ 18 | 19 | some_field = 1 20 | -------------------------------------------------------------------------------- /tests/data/example_3.py: -------------------------------------------------------------------------------- 1 | class Test: 2 | 3 | some_field = 1 4 | 5 | def test(self): 6 | """ 7 | Lorem ipsum dolor sit amet, consectetur adipiscing elit, 8 | deserunt mollit anim id est laborum. 9 | 10 | .. sourcecode:: pycon 11 | 12 | >>> # extract 100 LDA topics, using default parameters 13 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 14 | 15 | using distributed version with 4 workers 16 | running online LDA training, 100 topics, 1 passes over the supplied corpus of 3199665 documets, 17 | updating model once every 40000 documents 18 | .. 19 | 20 | Some another text 21 | """ 22 | 23 | return 1 24 | -------------------------------------------------------------------------------- /tests/data/example_4.py: -------------------------------------------------------------------------------- 1 | # SOME CODE 2 | 3 | """Author-topic model. 4 | 5 | The model was introduced by `Rosen-Zvi and co-authors: "The Author-Topic Model for Authors and Documents" 6 | `_. The model correlates the authorship information with the topics to give a better 7 | insight on the subject knowledge of an author. 8 | 9 | Example 10 | ------- 11 | 12 | .. sourcecode:: pycon 13 | 14 | >>> from gensim.models import AuthorTopicModel 15 | >>> from gensim.corpora import mmcorpus 16 | >>> from gensim.test.utils import common_dictionary, datapath, temporary_file 17 | >>> author2doc = { 18 | ... 'john': [0, 1, 2, 3, 4, 5, 6], 19 | ... 'jane': [2, 3, 4, 5, 6, 7, 8], 20 | ... 'jack': [0, 2, 4, 6, 8] 21 | ... } 22 | >>> 23 | 24 | """ 25 | -------------------------------------------------------------------------------- /tests/data/example_5.py: -------------------------------------------------------------------------------- 1 | class JSONB: 2 | """Represent the PostgreSQL JSONB type. 3 | 4 | The :class:`.JSONB` type stores arbitrary JSONB format data, e.g.: 5 | 6 | .. sourcecode:: python 7 | 8 | data_table = Table('data_table', metadata, 9 | Column('id', Integer, primary_key=True), 10 | Column('data', JSONB) 11 | ) 12 | 13 | with engine.connect() as conn: 14 | conn.execute( 15 | data_table.insert(), 16 | data = {"key1": "value1", "key2": "value2"} 17 | ) 18 | 19 | The :class:`.JSONB` type includ 20 | """ 21 | -------------------------------------------------------------------------------- /tests/data/example_6.py: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"). You 4 | # may not use this file except in compliance with the License. A copy of 5 | # the License is located at 6 | # 7 | # http://aws.amazon.com/apache2.0/ 8 | # 9 | # or in the "license" file accompanying this file. This file is 10 | # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF 11 | # ANY KIND, either express or implied. See the License for the specific 12 | # language governing permissions and limitations under the License. 13 | """Abstractions over S3's upload/download operations. 14 | 15 | This module provides high level abstractions for efficient 16 | uploads/downloads. It handles several things for the user: 17 | 18 | * Automatically switching to multipart transfers when 19 | a file is over a specific size threshold 20 | * Uploading/downloading a file in parallel 21 | * Progress callbacks to monitor transfers 22 | * Retries. While botocore handles retries for streaming uploads, 23 | it is not possible for it to handle retries for streaming 24 | downloads. This module handles retries for both cases so 25 | you don't need to implement any retry logic yourself. 26 | 27 | This module has a reasonable set of defaults. It also allows you 28 | to configure many aspects of the transfer process including: 29 | 30 | * Multipart threshold size 31 | * Max parallel downloads 32 | * Socket timeouts 33 | * Retry amounts 34 | 35 | There is no support for s3->s3 multipart copies at this 36 | time. 37 | 38 | 39 | .. _ref_s3transfer_usage: 40 | 41 | Usage 42 | ===== 43 | 44 | The simplest way to use this module is: 45 | 46 | .. code-block:: python 47 | 48 | client = boto3.client('s3', 'us-west-2') 49 | transfer = S3Transfer(client) 50 | # Upload /tmp/myfile to s3://bucket/key 51 | transfer.upload_file('/tmp/myfile', 'bucket', 'key') 52 | 53 | # Download s3://bucket/key to /tmp/myfile 54 | transfer.download_file('bucket', 'key', '/tmp/myfile') 55 | 56 | The ``upload_file`` and ``download_file`` methods also accept 57 | ``**kwargs``, which will be forwarded through to the corresponding 58 | client operation. Here are a few examples using ``upload_file``:: 59 | 60 | # Making the object public 61 | transfer.upload_file('/tmp/myfile', 'bucket', 'key', 62 | extra_args={'ACL': 'public-read'}) 63 | 64 | # Setting metadata 65 | transfer.upload_file('/tmp/myfile', 'bucket', 'key', 66 | extra_args={'Metadata': {'a': 'b', 'c': 'd'}}) 67 | 68 | # Setting content type 69 | transfer.upload_file('/tmp/myfile.json', 'bucket', 'key', 70 | extra_args={'ContentType': "application/json"}) 71 | 72 | 73 | The ``S3Transfer`` class also supports progress callbacks so you can 74 | provide transfer progress to users. Both the ``upload_file`` and 75 | ``download_file`` methods take an optional ``callback`` parameter. 76 | Here's an example of how to print a simple progress percentage 77 | to the user: 78 | 79 | .. code-block:: python 80 | 81 | class ProgressPercentage(object): 82 | def __init__(self, filename): 83 | self._filename = filename 84 | self._size = float(os.path.getsize(filename)) 85 | self._seen_so_far = 0 86 | self._lock = threading.Lock() 87 | 88 | def __call__(self, bytes_amount): 89 | # To simplify we'll assume this is hooked up 90 | # to a single filename. 91 | with self._lock: 92 | self._seen_so_far += bytes_amount 93 | percentage = (self._seen_so_far / self._size) * 100 94 | sys.stdout.write( 95 | "\r%s %s / %s (%.2f%%)" % ( 96 | self._filename, self._seen_so_far, self._size, 97 | percentage)) 98 | sys.stdout.flush() 99 | 100 | 101 | transfer = S3Transfer(boto3.client('s3', 'us-west-2')) 102 | # Upload /tmp/myfile to s3://bucket/key and print upload progress. 103 | transfer.upload_file('/tmp/myfile', 'bucket', 'key', 104 | callback=ProgressPercentage('/tmp/myfile')) 105 | 106 | 107 | 108 | You can also provide a TransferConfig object to the S3Transfer 109 | object that gives you more fine grained control over the 110 | transfer. For example: 111 | 112 | .. code-block:: python 113 | 114 | client = boto3.client('s3', 'us-west-2') 115 | config = TransferConfig( 116 | multipart_threshold=8 * 1024 * 1024, 117 | max_concurrency=10, 118 | num_download_attempts=10, 119 | ) 120 | transfer = S3Transfer(client, config) 121 | transfer.upload_file('/tmp/foo', 'bucket', 'key') 122 | 123 | 124 | """ 125 | from botocore.exceptions import ClientError 126 | from botocore.compat import six 127 | from s3transfer.exceptions import RetriesExceededError as \ 128 | S3TransferRetriesExceededError 129 | from s3transfer.manager import TransferConfig as S3TransferConfig 130 | from s3transfer.manager import TransferManager 131 | from s3transfer.futures import NonThreadedExecutor 132 | from s3transfer.subscribers import BaseSubscriber 133 | from s3transfer.utils import OSUtils 134 | 135 | from boto3.exceptions import RetriesExceededError, S3UploadFailedError 136 | 137 | 138 | KB = 1024 139 | MB = KB * KB 140 | 141 | 142 | def create_transfer_manager(client, config, osutil=None): 143 | """Creates a transfer manager based on configuration 144 | 145 | :type client: boto3.client 146 | :param client: The S3 client to use 147 | 148 | :type config: boto3.s3.transfer.TransferConfig 149 | :param config: The transfer config to use 150 | 151 | :type osutil: s3transfer.utils.OSUtils 152 | :param osutil: The os utility to use 153 | 154 | :rtype: s3transfer.manager.TransferManager 155 | :returns: A transfer manager based on parameters provided 156 | """ 157 | executor_cls = None 158 | if not config.use_threads: 159 | executor_cls = NonThreadedExecutor 160 | return TransferManager(client, config, osutil, executor_cls) 161 | 162 | 163 | class TransferConfig(S3TransferConfig): 164 | ALIAS = { 165 | 'max_concurrency': 'max_request_concurrency', 166 | 'max_io_queue': 'max_io_queue_size' 167 | } 168 | 169 | def __init__(self, 170 | multipart_threshold=8 * MB, 171 | max_concurrency=10, 172 | multipart_chunksize=8 * MB, 173 | num_download_attempts=5, 174 | max_io_queue=100, 175 | io_chunksize=256 * KB, 176 | use_threads=True): 177 | """Configuration object for managed S3 transfers 178 | 179 | :param multipart_threshold: The transfer size threshold for which 180 | multipart uploads, downloads, and copies will automatically be 181 | triggered. 182 | 183 | :param max_concurrency: The maximum number of threads that will be 184 | making requests to perform a transfer. If ``use_threads`` is 185 | set to ``False``, the value provided is ignored as the transfer 186 | will only ever use the main thread. 187 | 188 | :param multipart_chunksize: The partition size of each part for a 189 | multipart transfer. 190 | 191 | :param num_download_attempts: The number of download attempts that 192 | will be retried upon errors with downloading an object in S3. 193 | Note that these retries account for errors that occur when 194 | streaming down the data from s3 (i.e. socket errors and read 195 | timeouts that occur after recieving an OK response from s3). 196 | Other retryable exceptions such as throttling errors and 5xx 197 | errors are already retried by botocore (this default is 5). This 198 | does not take into account the number of exceptions retried by 199 | botocore. 200 | 201 | :param max_io_queue: The maximum amount of read parts that can be 202 | queued in memory to be written for a download. The size of each 203 | of these read parts is at most the size of ``io_chunksize``. 204 | 205 | :param io_chunksize: The max size of each chunk in the io queue. 206 | Currently, this is size used when ``read`` is called on the 207 | downloaded stream as well. 208 | 209 | :param use_threads: If True, threads will be used when performing 210 | S3 transfers. If False, no threads will be used in 211 | performing transfers: all logic will be ran in the main thread. 212 | """ 213 | super(TransferConfig, self).__init__( 214 | multipart_threshold=multipart_threshold, 215 | max_request_concurrency=max_concurrency, 216 | multipart_chunksize=multipart_chunksize, 217 | num_download_attempts=num_download_attempts, 218 | max_io_queue_size=max_io_queue, 219 | io_chunksize=io_chunksize, 220 | ) 221 | # Some of the argument names are not the same as the inherited 222 | # S3TransferConfig so we add aliases so you can still access the 223 | # old version of the names. 224 | for alias in self.ALIAS: 225 | setattr(self, alias, getattr(self, self.ALIAS[alias])) 226 | self.use_threads = use_threads 227 | 228 | def __setattr__(self, name, value): 229 | # If the alias name is used, make sure we set the name that it points 230 | # to as that is what actually is used in governing the TransferManager. 231 | if name in self.ALIAS: 232 | super(TransferConfig, self).__setattr__(self.ALIAS[name], value) 233 | # Always set the value of the actual name provided. 234 | super(TransferConfig, self).__setattr__(name, value) 235 | 236 | 237 | class S3Transfer(object): 238 | ALLOWED_DOWNLOAD_ARGS = TransferManager.ALLOWED_DOWNLOAD_ARGS 239 | ALLOWED_UPLOAD_ARGS = TransferManager.ALLOWED_UPLOAD_ARGS 240 | 241 | def __init__(self, client=None, config=None, osutil=None, manager=None): 242 | if not client and not manager: 243 | raise ValueError( 244 | 'Either a boto3.Client or s3transfer.manager.TransferManager ' 245 | 'must be provided' 246 | ) 247 | if manager and any([client, config, osutil]): 248 | raise ValueError( 249 | 'Manager cannot be provided with client, config, ' 250 | 'nor osutil. These parameters are mutually exclusive.' 251 | ) 252 | if config is None: 253 | config = TransferConfig() 254 | if osutil is None: 255 | osutil = OSUtils() 256 | if manager: 257 | self._manager = manager 258 | else: 259 | self._manager = create_transfer_manager(client, config, osutil) 260 | 261 | def upload_file(self, filename, bucket, key, 262 | callback=None, extra_args=None): 263 | """Upload a file to an S3 object. 264 | 265 | Variants have also been injected into S3 client, Bucket and Object. 266 | You don't have to use S3Transfer.upload_file() directly. 267 | 268 | .. seealso:: 269 | :py:meth:`S3.Client.upload_file` 270 | :py:meth:`S3.Client.upload_fileobj` 271 | """ 272 | if not isinstance(filename, six.string_types): 273 | raise ValueError('Filename must be a string') 274 | 275 | subscribers = self._get_subscribers(callback) 276 | future = self._manager.upload( 277 | filename, bucket, key, extra_args, subscribers) 278 | try: 279 | future.result() 280 | # If a client error was raised, add the backwards compatibility layer 281 | # that raises a S3UploadFailedError. These specific errors were only 282 | # ever thrown for upload_parts but now can be thrown for any related 283 | # client error. 284 | except ClientError as e: 285 | raise S3UploadFailedError( 286 | "Failed to upload %s to %s: %s" % ( 287 | filename, '/'.join([bucket, key]), e)) 288 | 289 | def download_file(self, bucket, key, filename, extra_args=None, 290 | callback=None): 291 | """Download an S3 object to a file. 292 | 293 | Variants have also been injected into S3 client, Bucket and Object. 294 | You don't have to use S3Transfer.download_file() directly. 295 | 296 | .. seealso:: 297 | :py:meth:`S3.Client.download_file` 298 | :py:meth:`S3.Client.download_fileobj` 299 | """ 300 | if not isinstance(filename, six.string_types): 301 | raise ValueError('Filename must be a string') 302 | 303 | subscribers = self._get_subscribers(callback) 304 | future = self._manager.download( 305 | bucket, key, filename, extra_args, subscribers) 306 | try: 307 | future.result() 308 | # This is for backwards compatibility where when retries are 309 | # exceeded we need to throw the same error from boto3 instead of 310 | # s3transfer's built in RetriesExceededError as current users are 311 | # catching the boto3 one instead of the s3transfer exception to do 312 | # their own retries. 313 | except S3TransferRetriesExceededError as e: 314 | raise RetriesExceededError(e.last_exception) 315 | 316 | def _get_subscribers(self, callback): 317 | if not callback: 318 | return None 319 | return [ProgressCallbackInvoker(callback)] 320 | 321 | def __enter__(self): 322 | return self 323 | 324 | def __exit__(self, *args): 325 | self._manager.__exit__(*args) 326 | 327 | 328 | class ProgressCallbackInvoker(BaseSubscriber): 329 | """A back-compat wrapper to invoke a provided callback via a subscriber 330 | 331 | :param callback: A callable that takes a single positional argument for 332 | how many bytes were transferred. 333 | """ 334 | def __init__(self, callback): 335 | self._callback = callback 336 | 337 | def on_progress(self, bytes_transferred, **kwargs): 338 | self._callback(bytes_transferred) 339 | -------------------------------------------------------------------------------- /tests/data/example_7.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Copyright (C) 2018 RARE Technologies 5 | # Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html 6 | 7 | 8 | """Callbacks can be used to observe the training process. 9 | 10 | Since training in huge corpora can be time consuming, we want to offer the users some insight 11 | into the process, in real time. In this way, convergence issues 12 | or other potential problems can be identified early in the process, 13 | saving precious time and resources. 14 | 15 | The metrics exposed through this module can be used to construct Callbacks, which will be called 16 | at specific points in the training process, such as "epoch starts" or "epoch finished". 17 | These metrics can be used to assess mod's convergence or correctness, for example 18 | to save the model, visualize intermediate results, or anything else. 19 | 20 | Usage examples 21 | -------------- 22 | To implement a Callback, inherit from this base class and override one or more of its methods. 23 | 24 | Create a callback to save the training model after each epoch 25 | .. sourcecode:: pycon 26 | 27 | >>> from gensim.test.utils import get_tmpfile 28 | >>> from gensim.models.callbacks import CallbackAny2Vec 29 | >>> 30 | >>> 31 | >>> class EpochSaver(CallbackAny2Vec): 32 | ... '''Callback to save model after each epoch.''' 33 | ... 34 | ... def __init__(self, path_prefix): 35 | ... self.path_prefix = path_prefix 36 | ... self.epoch = 0 37 | ... 38 | ... def on_epoch_end(self, model): 39 | ... output_path = get_tmpfile('{}_epoch{}.model'.format(self.path_prefix, self.epoch)) 40 | ... model.save(output_path) 41 | ... self.epoch += 1 42 | ... 43 | 44 | Create a 45 | """ 46 | -------------------------------------------------------------------------------- /tests/data/example_8.py: -------------------------------------------------------------------------------- 1 | class Test: 2 | 3 | def _cvc(self, i): 4 | """Check if b[j - 2: j + 1] makes the (consonant, vowel, consonant) pattern and also 5 | if the second 'c' is not 'w', 'x' or 'y'. This is used when trying to restore an 'e' at the end of a short word, 6 | e.g. cav(e), lov(e), hop(e), crim(e), but snow, box, tray. 7 | 8 | Parameters 9 | ---------- 10 | i : int 11 | Index for `b` 12 | 13 | Returns 14 | ------- 15 | bool 16 | 17 | Examples 18 | -------- 19 | .. sourcecode:: pycon 20 | 21 | >>> from gensim.parsing.porter import PorterStemmer 22 | >>> p = PorterStemmer() 23 | >>> p.b = "lib" 24 | >>> p.j = 2 25 | >>> p._cvc(2) 26 | True 27 | 28 | >>> from gensim.parsing.porter import PorterStemmer 29 | >>> p = PorterStemmer() 30 | >>> p.b = "dll" 31 | >>> p.j = 2 32 | >>> p._cvc(2) 33 | False 34 | 35 | >>> from gensim.parsing.porter import PorterStemmer 36 | >>> p = PorterStemmer() 37 | >>> p.b = "wow" 38 | >>> p.j = 2 39 | >>> p._cvc(2) 40 | False 41 | 42 | """ 43 | if i < 2 or not self._cons(i) or self._cons(i - 1) or not self._cons(i - 2): 44 | return False 45 | return self.b[i] not in "wxy" 46 | -------------------------------------------------------------------------------- /tests/data/example_9.py: -------------------------------------------------------------------------------- 1 | class JSONB: 2 | """Represent the PostgreSQL JSONB type. 3 | 4 | The :class:`.JSONB` type stores arbitrary JSONB format data, e.g.: 5 | 6 | .. sourcecode:: python 7 | 8 | kek =( 9 | 10 | The :class:`.JSONB` type includ 11 | """ 12 | -------------------------------------------------------------------------------- /tests/result_py2/result_1.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 3 | 7, 4 | 14, 5 | "undefined name 'LdaModel'", 6 | u' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 7 | ('F821', 8 | 7, 9 | 30, 10 | "undefined name 'mm'", 11 | u' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 12 | ('F821', 13 | 7, 14 | 42, 15 | "undefined name 'id2word'", 16 | u' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 17 | {'logical lines': 1, 'physical lines': 1, 'tokens': 21}) 18 | -------------------------------------------------------------------------------- /tests/result_py2/result_10.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 3 | 12, 4 | 14, 5 | "undefined name 'LdbModel'", 6 | u' >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 7 | ('F821', 8 | 12, 9 | 30, 10 | "undefined name 'mm'", 11 | u' >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 12 | ('F821', 13 | 12, 14 | 42, 15 | "undefined name 'id2word'", 16 | u' >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 17 | {'logical lines': 1, 'physical lines': 1, 'tokens': 21}) 18 | -------------------------------------------------------------------------------- /tests/result_py2/result_11.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E111', 16, 15, 'indentation is not a multiple of four', u' ....: print(a[i] is b[i])\n'), 3 | (u'E231', 11, 29, u"missing whitespace after ','", u' In [10]: %timeit a = (1, 2,name) # noqa: F821\n'), 4 | ('F821', 9, 19, "undefined name 'brian'", u' In [9]: other = brian\n'), 5 | ('W391', 17, 11, 'blank line at end of file', u' ....:\n')], 6 | {'logical lines': 6, 'physical lines': 7, 'tokens': 53}) 7 | -------------------------------------------------------------------------------- /tests/result_py2/result_12.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E402', 13, 4, 'module level import not at top of file', u' import matplotlib.pyplot as plt\n'), 3 | ('E402', 22, 4, 'module level import not at top of file', u' import numpy as np\n'), 4 | ('E501', 5 | 6, 6 | 83, 7 | 'line too long (82 > 80 characters)', 8 | u" sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n"), 9 | ('E702', 10 | 16, 11 | 26, 12 | 'multiple statements on one line (semicolon)', 13 | u" plt.plot([1, 2, 3, 4]); plt.ylabel('some numbers');\n"), 14 | ('E703', 16, 54, 'statement ends with a semicolon', u" plt.plot([1, 2, 3, 4]); plt.ylabel('some numbers');\n"), 15 | ('F821', 25, 4, "undefined name 'hist'", u' hist(np.random.randn(10000), 100)\n')], 16 | {'logical lines': 7, 'physical lines': 10, 'tokens': 92}) 17 | -------------------------------------------------------------------------------- /tests/result_py2/result_13.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 5, 10, "undefined name 'LdaModel'", u'>>> lda = LdaModel(corpus=mm, id2word=id2word,\n'), 3 | ('F821', 5, 26, "undefined name 'mm'", u'>>> lda = LdaModel(corpus=mm, id2word=id2word,\n'), 4 | ('F821', 5, 38, "undefined name 'id2word'", u'>>> lda = LdaModel(corpus=mm, id2word=id2word,\n'), 5 | ('F821', 6 | 6, 7 | 47, 8 | "undefined name 'distribution_required'", 9 | u'... num_topics=100, distributed=distribution_required)\n'), 10 | ('F821', 11 | 9, 12 | 10, 13 | "undefined name 'LdbModel'", 14 | u'>>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 15 | ('F821', 16 | 9, 17 | 26, 18 | "undefined name 'mm'", 19 | u'>>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 20 | ('F821', 21 | 9, 22 | 38, 23 | "undefined name 'id2word'", 24 | u'>>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 25 | {'logical lines': 2, 'physical lines': 3, 'tokens': 43}) 26 | -------------------------------------------------------------------------------- /tests/result_py2/result_2.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 3 | 9, 4 | 18, 5 | "undefined name 'LdaModel'", 6 | u' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 7 | ('F821', 8 | 9, 9 | 34, 10 | "undefined name 'mm'", 11 | u' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 12 | ('F821', 13 | 9, 14 | 46, 15 | "undefined name 'id2word'", 16 | u' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 17 | {'logical lines': 1, 'physical lines': 1, 'tokens': 21}) 18 | -------------------------------------------------------------------------------- /tests/result_py2/result_3.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 3 | 13, 4 | 22, 5 | "undefined name 'LdaModel'", 6 | u' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 7 | ('F821', 8 | 13, 9 | 38, 10 | "undefined name 'mm'", 11 | u' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 12 | ('F821', 13 | 13, 14 | 50, 15 | "undefined name 'id2word'", 16 | u' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 17 | {'logical lines': 1, 'physical lines': 1, 'tokens': 21}) 18 | -------------------------------------------------------------------------------- /tests/result_py2/result_4.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F401', 3 | 14, 4 | 8, 5 | "'gensim.models.AuthorTopicModel' imported but unused", 6 | u' >>> from gensim.models import AuthorTopicModel\n'), 7 | ('F401', 15, 8, "'gensim.corpora.mmcorpus' imported but unused", u' >>> from gensim.corpora import mmcorpus\n'), 8 | ('F401', 9 | 16, 10 | 8, 11 | "'gensim.test.utils.common_dictionary' imported but unused", 12 | u' >>> from gensim.test.utils import common_dictionary, datapath, temporary_file\n'), 13 | ('F401', 14 | 16, 15 | 8, 16 | "'gensim.test.utils.datapath' imported but unused", 17 | u' >>> from gensim.test.utils import common_dictionary, datapath, temporary_file\n'), 18 | ('F401', 19 | 16, 20 | 8, 21 | "'gensim.test.utils.temporary_file' imported but unused", 22 | u' >>> from gensim.test.utils import common_dictionary, datapath, temporary_file\n')], 23 | {'logical lines': 4, 'physical lines': 8, 'tokens': 85}) 24 | -------------------------------------------------------------------------------- /tests/result_py2/result_5.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E128', 3 | 9, 4 | 12, 5 | 'continuation line under-indented for visual indent', 6 | u" Column('id', Integer, primary_key=True),\n"), 7 | ('E251', 8 | 16, 9 | 20, 10 | 'unexpected spaces around keyword / parameter equals', 11 | u' data = {"key1": "value1", "key2": "value2"}\n'), 12 | ('E251', 13 | 16, 14 | 22, 15 | 'unexpected spaces around keyword / parameter equals', 16 | u' data = {"key1": "value1", "key2": "value2"}\n'), 17 | ('F821', 8, 21, "undefined name 'Table'", u" data_table = Table('data_table', metadata,\n"), 18 | ('F821', 8, 41, "undefined name 'metadata'", u" data_table = Table('data_table', metadata,\n"), 19 | ('F821', 9, 12, "undefined name 'Column'", u" Column('id', Integer, primary_key=True),\n"), 20 | ('F821', 9, 25, "undefined name 'Integer'", u" Column('id', Integer, primary_key=True),\n"), 21 | ('F821', 10, 12, "undefined name 'Column'", u" Column('data', JSONB)\n"), 22 | ('F821', 10, 27, "undefined name 'JSONB'", u" Column('data', JSONB)\n"), 23 | ('F821', 13, 13, "undefined name 'engine'", u' with engine.connect() as conn:\n')], 24 | {'logical lines': 3, 'physical lines': 10, 'tokens': 68}) 25 | -------------------------------------------------------------------------------- /tests/result_py2/result_6.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 48, 13, "undefined name 'boto3'", u" client = boto3.client('s3', 'us-west-2')\n"), 3 | ('F821', 49, 15, "undefined name 'S3Transfer'", u' transfer = S3Transfer(client)\n')], 4 | {'logical lines': 6, 'physical lines': 7, 'tokens': 45}) 5 | -------------------------------------------------------------------------------- /tests/result_py2/result_7.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E302', 31, 8, 'expected 2 blank lines, found 0', u' >>> class EpochSaver(CallbackAny2Vec):\n'), 3 | ('E501', 4 | 39, 5 | 88, 6 | 'line too long (90 > 80 characters)', 7 | u" ... output_path = get_tmpfile('{}_epoch{}.model'.format(self.path_prefix, self.epoch))\n")], 8 | {'logical lines': 11, 'physical lines': 13, 'tokens': 94}) 9 | -------------------------------------------------------------------------------- /tests/result_py2/result_8.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E402', 3 | 28, 4 | 16, 5 | 'module level import not at top of file', 6 | u' >>> from gensim.parsing.porter import PorterStemmer\n'), 7 | ('E402', 8 | 35, 9 | 16, 10 | 'module level import not at top of file', 11 | u' >>> from gensim.parsing.porter import PorterStemmer\n')], 12 | {'logical lines': 15, 'physical lines': 15, 'tokens': 102}) 13 | -------------------------------------------------------------------------------- /tests/result_py2/result_9.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E902', 7, 0, 'TokenError: EOF in multi-line statement', u'\n'), 3 | ('E999', 8, 13, 'SyntaxError: invalid syntax', u' kek =(\n')], 4 | {'logical lines': 0, 'physical lines': 2, 'tokens': 5}) 5 | -------------------------------------------------------------------------------- /tests/result_py3/result_1.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 3 | 7, 4 | 14, 5 | "undefined name 'LdaModel'", 6 | ' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 7 | ('F821', 8 | 7, 9 | 30, 10 | "undefined name 'mm'", 11 | ' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 12 | ('F821', 13 | 7, 14 | 42, 15 | "undefined name 'id2word'", 16 | ' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 17 | {'logical lines': 1, 'physical lines': 1, 'tokens': 21}) 18 | -------------------------------------------------------------------------------- /tests/result_py3/result_10.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 3 | 12, 4 | 14, 5 | "undefined name 'LdbModel'", 6 | ' >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 7 | ('F821', 8 | 12, 9 | 30, 10 | "undefined name 'mm'", 11 | ' >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 12 | ('F821', 13 | 12, 14 | 42, 15 | "undefined name 'id2word'", 16 | ' >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 17 | {'logical lines': 1, 'physical lines': 1, 'tokens': 21}) 18 | -------------------------------------------------------------------------------- /tests/result_py3/result_11.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E111', 16, 15, 'indentation is not a multiple of four', ' ....: print(a[i] is b[i])\n'), 3 | ('E231', 11, 29, "missing whitespace after ','", ' In [10]: %timeit a = (1, 2,name) # noqa: F821\n'), 4 | ('F821', 9, 19, "undefined name 'brian'", ' In [9]: other = brian\n'), 5 | ('W391', 17, 11, 'blank line at end of file', ' ....:\n')], 6 | {'logical lines': 6, 'physical lines': 7, 'tokens': 53}) 7 | -------------------------------------------------------------------------------- /tests/result_py3/result_12.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E402', 13, 4, 'module level import not at top of file', ' import matplotlib.pyplot as plt\n'), 3 | ('E402', 22, 4, 'module level import not at top of file', ' import numpy as np\n'), 4 | ('E501', 5 | 6, 6 | 83, 7 | 'line too long (82 > 80 characters)', 8 | " sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))\n"), 9 | ('E702', 10 | 16, 11 | 26, 12 | 'multiple statements on one line (semicolon)', 13 | " plt.plot([1, 2, 3, 4]); plt.ylabel('some numbers');\n"), 14 | ('E703', 16, 54, 'statement ends with a semicolon', " plt.plot([1, 2, 3, 4]); plt.ylabel('some numbers');\n"), 15 | ('F821', 25, 4, "undefined name 'hist'", ' hist(np.random.randn(10000), 100)\n')], 16 | {'logical lines': 7, 'physical lines': 10, 'tokens': 92}) 17 | -------------------------------------------------------------------------------- /tests/result_py3/result_13.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 5, 10, "undefined name 'LdaModel'", '>>> lda = LdaModel(corpus=mm, id2word=id2word,\n'), 3 | ('F821', 5, 26, "undefined name 'mm'", '>>> lda = LdaModel(corpus=mm, id2word=id2word,\n'), 4 | ('F821', 5, 38, "undefined name 'id2word'", '>>> lda = LdaModel(corpus=mm, id2word=id2word,\n'), 5 | ('F821', 6 | 6, 7 | 47, 8 | "undefined name 'distribution_required'", 9 | '... num_topics=100, distributed=distribution_required)\n'), 10 | ('F821', 11 | 9, 12 | 10, 13 | "undefined name 'LdbModel'", 14 | '>>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 15 | ('F821', 16 | 9, 17 | 26, 18 | "undefined name 'mm'", 19 | '>>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 20 | ('F821', 21 | 9, 22 | 38, 23 | "undefined name 'id2word'", 24 | '>>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 25 | {'logical lines': 2, 'physical lines': 3, 'tokens': 43}) 26 | -------------------------------------------------------------------------------- /tests/result_py3/result_14.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E222', 7, 16, 'multiple spaces after operator', ' >>> e3 = x < 1\n')], 3 | {'logical lines': 2, 'physical lines': 2, 'tokens': 10}) 4 | -------------------------------------------------------------------------------- /tests/result_py3/result_2.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 3 | 9, 4 | 18, 5 | "undefined name 'LdaModel'", 6 | ' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 7 | ('F821', 8 | 9, 9 | 34, 10 | "undefined name 'mm'", 11 | ' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 12 | ('F821', 13 | 9, 14 | 46, 15 | "undefined name 'id2word'", 16 | ' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 17 | {'logical lines': 1, 'physical lines': 1, 'tokens': 21}) 18 | -------------------------------------------------------------------------------- /tests/result_py3/result_3.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 3 | 13, 4 | 22, 5 | "undefined name 'LdaModel'", 6 | ' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 7 | ('F821', 8 | 13, 9 | 38, 10 | "undefined name 'mm'", 11 | ' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n'), 12 | ('F821', 13 | 13, 14 | 50, 15 | "undefined name 'id2word'", 16 | ' >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True)\n')], 17 | {'logical lines': 1, 'physical lines': 1, 'tokens': 21}) 18 | -------------------------------------------------------------------------------- /tests/result_py3/result_4.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F401', 3 | 14, 4 | 8, 5 | "'gensim.models.AuthorTopicModel' imported but unused", 6 | ' >>> from gensim.models import AuthorTopicModel\n'), 7 | ('F401', 15, 8, "'gensim.corpora.mmcorpus' imported but unused", ' >>> from gensim.corpora import mmcorpus\n'), 8 | ('F401', 9 | 16, 10 | 8, 11 | "'gensim.test.utils.common_dictionary' imported but unused", 12 | ' >>> from gensim.test.utils import common_dictionary, datapath, temporary_file\n'), 13 | ('F401', 14 | 16, 15 | 8, 16 | "'gensim.test.utils.datapath' imported but unused", 17 | ' >>> from gensim.test.utils import common_dictionary, datapath, temporary_file\n'), 18 | ('F401', 19 | 16, 20 | 8, 21 | "'gensim.test.utils.temporary_file' imported but unused", 22 | ' >>> from gensim.test.utils import common_dictionary, datapath, temporary_file\n')], 23 | {'logical lines': 4, 'physical lines': 8, 'tokens': 85}) 24 | -------------------------------------------------------------------------------- /tests/result_py3/result_5.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E128', 3 | 9, 4 | 12, 5 | 'continuation line under-indented for visual indent', 6 | " Column('id', Integer, primary_key=True),\n"), 7 | ('E251', 8 | 16, 9 | 20, 10 | 'unexpected spaces around keyword / parameter equals', 11 | ' data = {"key1": "value1", "key2": "value2"}\n'), 12 | ('E251', 13 | 16, 14 | 22, 15 | 'unexpected spaces around keyword / parameter equals', 16 | ' data = {"key1": "value1", "key2": "value2"}\n'), 17 | ('F821', 8, 21, "undefined name 'Table'", " data_table = Table('data_table', metadata,\n"), 18 | ('F821', 8, 41, "undefined name 'metadata'", " data_table = Table('data_table', metadata,\n"), 19 | ('F821', 9, 12, "undefined name 'Column'", " Column('id', Integer, primary_key=True),\n"), 20 | ('F821', 9, 25, "undefined name 'Integer'", " Column('id', Integer, primary_key=True),\n"), 21 | ('F821', 10, 12, "undefined name 'Column'", " Column('data', JSONB)\n"), 22 | ('F821', 10, 27, "undefined name 'JSONB'", " Column('data', JSONB)\n"), 23 | ('F821', 13, 13, "undefined name 'engine'", ' with engine.connect() as conn:\n')], 24 | {'logical lines': 3, 'physical lines': 10, 'tokens': 68}) 25 | -------------------------------------------------------------------------------- /tests/result_py3/result_6.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('F821', 48, 13, "undefined name 'boto3'", " client = boto3.client('s3', 'us-west-2')\n"), 3 | ('F821', 49, 15, "undefined name 'S3Transfer'", ' transfer = S3Transfer(client)\n')], 4 | {'logical lines': 6, 'physical lines': 7, 'tokens': 45}) 5 | -------------------------------------------------------------------------------- /tests/result_py3/result_7.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E302', 31, 8, 'expected 2 blank lines, found 0', ' >>> class EpochSaver(CallbackAny2Vec):\n'), 3 | ('E501', 4 | 39, 5 | 88, 6 | 'line too long (90 > 80 characters)', 7 | " ... output_path = get_tmpfile('{}_epoch{}.model'.format(self.path_prefix, self.epoch))\n")], 8 | {'logical lines': 11, 'physical lines': 13, 'tokens': 94}) 9 | -------------------------------------------------------------------------------- /tests/result_py3/result_8.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E402', 3 | 28, 4 | 16, 5 | 'module level import not at top of file', 6 | ' >>> from gensim.parsing.porter import PorterStemmer\n'), 7 | ('E402', 8 | 35, 9 | 16, 10 | 'module level import not at top of file', 11 | ' >>> from gensim.parsing.porter import PorterStemmer\n')], 12 | {'logical lines': 15, 'physical lines': 15, 'tokens': 102}) 13 | -------------------------------------------------------------------------------- /tests/result_py3/result_9.py: -------------------------------------------------------------------------------- 1 | ('test_precisely', 2 | [('E902', 7, 0, 'TokenError: EOF in multi-line statement', '\n')], 3 | {'logical lines': 0, 'physical lines': 2, 'tokens': 5}) 4 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_1.txt: -------------------------------------------------------------------------------- 1 | ./data/example_1.rst:7:15: F821 undefined name 'LdaModel' 2 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 3 | ^ 4 | ./data/example_1.rst:7:31: F821 undefined name 'mm' 5 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 6 | ^ 7 | ./data/example_1.rst:7:43: F821 undefined name 'id2word' 8 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 9 | ^ 10 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_10.txt: -------------------------------------------------------------------------------- 1 | ./data/example_10.py:12:15: F821 undefined name 'LdbModel' 2 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 3 | ^ 4 | ./data/example_10.py:12:31: F821 undefined name 'mm' 5 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 6 | ^ 7 | ./data/example_10.py:12:43: F821 undefined name 'id2word' 8 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 9 | ^ 10 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_11.txt: -------------------------------------------------------------------------------- 1 | ./data/example_11.rst:11:30: E231 missing whitespace after ',' 2 | In [10]: %timeit a = (1, 2,name) # noqa: F821 3 | ^ 4 | ./data/example_11.rst:16:16: E111 indentation is not a multiple of four 5 | ....: print(a[i] is b[i]) 6 | ^ 7 | ./data/example_11.rst:17:12: W391 blank line at end of file 8 | ....: 9 | ^ 10 | ./data/example_11.rst:9:20: F821 undefined name 'brian' 11 | In [9]: other = brian 12 | ^ 13 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_12.txt: -------------------------------------------------------------------------------- 1 | ./data/example_12.rst:13:5: E402 module level import not at top of file 2 | import matplotlib.pyplot as plt 3 | ^ 4 | ./data/example_12.rst:16:27: E702 multiple statements on one line (semicolon) 5 | plt.plot([1, 2, 3, 4]); plt.ylabel('some numbers'); 6 | ^ 7 | ./data/example_12.rst:16:55: E703 statement ends with a semicolon 8 | plt.plot([1, 2, 3, 4]); plt.ylabel('some numbers'); 9 | ^ 10 | ./data/example_12.rst:22:5: E402 module level import not at top of file 11 | import numpy as np 12 | ^ 13 | ./data/example_12.rst:25:5: F821 undefined name 'hist' 14 | hist(np.random.randn(10000), 100) 15 | ^ 16 | ./data/example_12.rst:6:83: E501 line too long (82 > 79 characters) 17 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 18 | ^ 19 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_13.txt: -------------------------------------------------------------------------------- 1 | ./data/example_13.rst:5:11: F821 undefined name 'LdaModel' 2 | >>> lda = LdaModel(corpus=mm, id2word=id2word, 3 | ^ 4 | ./data/example_13.rst:5:27: F821 undefined name 'mm' 5 | >>> lda = LdaModel(corpus=mm, id2word=id2word, 6 | ^ 7 | ./data/example_13.rst:5:39: F821 undefined name 'id2word' 8 | >>> lda = LdaModel(corpus=mm, id2word=id2word, 9 | ^ 10 | ./data/example_13.rst:6:48: F821 undefined name 'distribution_required' 11 | ... num_topics=100, distributed=distribution_required) 12 | ^ 13 | ./data/example_13.rst:9:11: F821 undefined name 'LdbModel' 14 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 15 | ^ 16 | ./data/example_13.rst:9:27: F821 undefined name 'mm' 17 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 18 | ^ 19 | ./data/example_13.rst:9:39: F821 undefined name 'id2word' 20 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 21 | ^ 22 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_2.txt: -------------------------------------------------------------------------------- 1 | ./data/example_2.py:9:19: F821 undefined name 'LdaModel' 2 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 3 | ^ 4 | ./data/example_2.py:9:35: F821 undefined name 'mm' 5 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 6 | ^ 7 | ./data/example_2.py:9:47: F821 undefined name 'id2word' 8 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 9 | ^ 10 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_3.txt: -------------------------------------------------------------------------------- 1 | ./data/example_3.py:13:23: F821 undefined name 'LdaModel' 2 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 3 | ^ 4 | ./data/example_3.py:13:39: F821 undefined name 'mm' 5 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 6 | ^ 7 | ./data/example_3.py:13:51: F821 undefined name 'id2word' 8 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 9 | ^ 10 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_4.txt: -------------------------------------------------------------------------------- 1 | ./data/example_4.py:14:9: F401 'gensim.models.AuthorTopicModel' imported but unused 2 | >>> from gensim.models import AuthorTopicModel 3 | ^ 4 | ./data/example_4.py:15:9: F401 'gensim.corpora.mmcorpus' imported but unused 5 | >>> from gensim.corpora import mmcorpus 6 | ^ 7 | ./data/example_4.py:16:9: F401 'gensim.test.utils.common_dictionary' imported but unused 8 | >>> from gensim.test.utils import common_dictionary, datapath, temporary_file 9 | ^ 10 | ./data/example_4.py:16:9: F401 'gensim.test.utils.datapath' imported but unused 11 | >>> from gensim.test.utils import common_dictionary, datapath, temporary_file 12 | ^ 13 | ./data/example_4.py:16:9: F401 'gensim.test.utils.temporary_file' imported but unused 14 | >>> from gensim.test.utils import common_dictionary, datapath, temporary_file 15 | ^ 16 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_5.txt: -------------------------------------------------------------------------------- 1 | ./data/example_5.py:10:13: F821 undefined name 'Column' 2 | Column('data', JSONB) 3 | ^ 4 | ./data/example_5.py:10:28: F821 undefined name 'JSONB' 5 | Column('data', JSONB) 6 | ^ 7 | ./data/example_5.py:13:14: F821 undefined name 'engine' 8 | with engine.connect() as conn: 9 | ^ 10 | ./data/example_5.py:16:21: E251 unexpected spaces around keyword / parameter equals 11 | data = {"key1": "value1", "key2": "value2"} 12 | ^ 13 | ./data/example_5.py:16:23: E251 unexpected spaces around keyword / parameter equals 14 | data = {"key1": "value1", "key2": "value2"} 15 | ^ 16 | ./data/example_5.py:8:22: F821 undefined name 'Table' 17 | data_table = Table('data_table', metadata, 18 | ^ 19 | ./data/example_5.py:8:42: F821 undefined name 'metadata' 20 | data_table = Table('data_table', metadata, 21 | ^ 22 | ./data/example_5.py:9:13: E128 continuation line under-indented for visual indent 23 | Column('id', Integer, primary_key=True), 24 | ^ 25 | ./data/example_5.py:9:13: F821 undefined name 'Column' 26 | Column('id', Integer, primary_key=True), 27 | ^ 28 | ./data/example_5.py:9:26: F821 undefined name 'Integer' 29 | Column('id', Integer, primary_key=True), 30 | ^ 31 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_6.txt: -------------------------------------------------------------------------------- 1 | ./data/example_6.py:101:16: F821 undefined name 'S3Transfer' 2 | transfer = S3Transfer(boto3.client('s3', 'us-west-2')) 3 | ^ 4 | ./data/example_6.py:101:27: F821 undefined name 'boto3' 5 | transfer = S3Transfer(boto3.client('s3', 'us-west-2')) 6 | ^ 7 | ./data/example_6.py:114:14: F821 undefined name 'boto3' 8 | client = boto3.client('s3', 'us-west-2') 9 | ^ 10 | ./data/example_6.py:115:14: F821 undefined name 'TransferConfig' 11 | config = TransferConfig( 12 | ^ 13 | ./data/example_6.py:120:16: F821 undefined name 'S3Transfer' 14 | transfer = S3Transfer(client, config) 15 | ^ 16 | ./data/example_6.py:48:14: F821 undefined name 'boto3' 17 | client = boto3.client('s3', 'us-west-2') 18 | ^ 19 | ./data/example_6.py:49:16: F821 undefined name 'S3Transfer' 20 | transfer = S3Transfer(client) 21 | ^ 22 | ./data/example_6.py:84:32: F821 undefined name 'os' 23 | self._size = float(os.path.getsize(filename)) 24 | ^ 25 | ./data/example_6.py:86:26: F821 undefined name 'threading' 26 | self._lock = threading.Lock() 27 | ^ 28 | ./data/example_6.py:94:17: F821 undefined name 'sys' 29 | sys.stdout.write( 30 | ^ 31 | ./data/example_6.py:98:17: F821 undefined name 'sys' 32 | sys.stdout.flush() 33 | ^ 34 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_7.txt: -------------------------------------------------------------------------------- 1 | ./data/example_7.py:31:9: E302 expected 2 blank lines, found 0 2 | >>> class EpochSaver(CallbackAny2Vec): 3 | ^ 4 | ./data/example_7.py:39:88: E501 line too long (90 > 79 characters) 5 | ... output_path = get_tmpfile('{}_epoch{}.model'.format(self.path_prefix, self.epoch)) 6 | ^ 7 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_8.txt: -------------------------------------------------------------------------------- 1 | ./data/example_8.py:28:17: E402 module level import not at top of file 2 | >>> from gensim.parsing.porter import PorterStemmer 3 | ^ 4 | ./data/example_8.py:35:17: E402 module level import not at top of file 5 | >>> from gensim.parsing.porter import PorterStemmer 6 | ^ 7 | -------------------------------------------------------------------------------- /tests/summary_py2/summary_9.txt: -------------------------------------------------------------------------------- 1 | ./data/example_9.py:7:1: E902 TokenError: EOF in multi-line statement 2 | 3 | ^ 4 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_1.txt: -------------------------------------------------------------------------------- 1 | ./data/example_1.rst:7:15: F821 undefined name 'LdaModel' 2 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 3 | ^ 4 | ./data/example_1.rst:7:31: F821 undefined name 'mm' 5 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 6 | ^ 7 | ./data/example_1.rst:7:43: F821 undefined name 'id2word' 8 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 9 | ^ 10 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_10.txt: -------------------------------------------------------------------------------- 1 | ./data/example_10.py:12:15: F821 undefined name 'LdbModel' 2 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 3 | ^ 4 | ./data/example_10.py:12:31: F821 undefined name 'mm' 5 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 6 | ^ 7 | ./data/example_10.py:12:43: F821 undefined name 'id2word' 8 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 9 | ^ 10 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_11.txt: -------------------------------------------------------------------------------- 1 | ./data/example_11.rst:11:30: E231 missing whitespace after ',' 2 | In [10]: %timeit a = (1, 2,name) # noqa: F821 3 | ^ 4 | ./data/example_11.rst:16:16: E111 indentation is not a multiple of four 5 | ....: print(a[i] is b[i]) 6 | ^ 7 | ./data/example_11.rst:17:12: W391 blank line at end of file 8 | ....: 9 | ^ 10 | ./data/example_11.rst:9:20: F821 undefined name 'brian' 11 | In [9]: other = brian 12 | ^ 13 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_12.txt: -------------------------------------------------------------------------------- 1 | ./data/example_12.rst:13:5: E402 module level import not at top of file 2 | import matplotlib.pyplot as plt 3 | ^ 4 | ./data/example_12.rst:16:27: E702 multiple statements on one line (semicolon) 5 | plt.plot([1, 2, 3, 4]); plt.ylabel('some numbers'); 6 | ^ 7 | ./data/example_12.rst:16:55: E703 statement ends with a semicolon 8 | plt.plot([1, 2, 3, 4]); plt.ylabel('some numbers'); 9 | ^ 10 | ./data/example_12.rst:22:5: E402 module level import not at top of file 11 | import numpy as np 12 | ^ 13 | ./data/example_12.rst:25:5: F821 undefined name 'hist' 14 | hist(np.random.randn(10000), 100) 15 | ^ 16 | ./data/example_12.rst:6:83: E501 line too long (82 > 79 characters) 17 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) 18 | ^ 19 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_13.txt: -------------------------------------------------------------------------------- 1 | ./data/example_13.rst:5:11: F821 undefined name 'LdaModel' 2 | >>> lda = LdaModel(corpus=mm, id2word=id2word, 3 | ^ 4 | ./data/example_13.rst:5:27: F821 undefined name 'mm' 5 | >>> lda = LdaModel(corpus=mm, id2word=id2word, 6 | ^ 7 | ./data/example_13.rst:5:39: F821 undefined name 'id2word' 8 | >>> lda = LdaModel(corpus=mm, id2word=id2word, 9 | ^ 10 | ./data/example_13.rst:6:48: F821 undefined name 'distribution_required' 11 | ... num_topics=100, distributed=distribution_required) 12 | ^ 13 | ./data/example_13.rst:9:11: F821 undefined name 'LdbModel' 14 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 15 | ^ 16 | ./data/example_13.rst:9:27: F821 undefined name 'mm' 17 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 18 | ^ 19 | ./data/example_13.rst:9:39: F821 undefined name 'id2word' 20 | >>> ldb = LdbModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 21 | ^ 22 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_14.txt: -------------------------------------------------------------------------------- 1 | ./data/example_14.py:7:17: E222 multiple spaces after operator 2 | >>> e3 = x < 1 3 | ^ 4 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_2.txt: -------------------------------------------------------------------------------- 1 | ./data/example_2.py:9:19: F821 undefined name 'LdaModel' 2 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 3 | ^ 4 | ./data/example_2.py:9:35: F821 undefined name 'mm' 5 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 6 | ^ 7 | ./data/example_2.py:9:47: F821 undefined name 'id2word' 8 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 9 | ^ 10 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_3.txt: -------------------------------------------------------------------------------- 1 | ./data/example_3.py:13:23: F821 undefined name 'LdaModel' 2 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 3 | ^ 4 | ./data/example_3.py:13:39: F821 undefined name 'mm' 5 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 6 | ^ 7 | ./data/example_3.py:13:51: F821 undefined name 'id2word' 8 | >>> lda = LdaModel(corpus=mm, id2word=id2word, num_topics=100, distributed=True) 9 | ^ 10 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_4.txt: -------------------------------------------------------------------------------- 1 | ./data/example_4.py:14:9: F401 'gensim.models.AuthorTopicModel' imported but unused 2 | >>> from gensim.models import AuthorTopicModel 3 | ^ 4 | ./data/example_4.py:15:9: F401 'gensim.corpora.mmcorpus' imported but unused 5 | >>> from gensim.corpora import mmcorpus 6 | ^ 7 | ./data/example_4.py:16:9: F401 'gensim.test.utils.common_dictionary' imported but unused 8 | >>> from gensim.test.utils import common_dictionary, datapath, temporary_file 9 | ^ 10 | ./data/example_4.py:16:9: F401 'gensim.test.utils.datapath' imported but unused 11 | >>> from gensim.test.utils import common_dictionary, datapath, temporary_file 12 | ^ 13 | ./data/example_4.py:16:9: F401 'gensim.test.utils.temporary_file' imported but unused 14 | >>> from gensim.test.utils import common_dictionary, datapath, temporary_file 15 | ^ 16 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_5.txt: -------------------------------------------------------------------------------- 1 | ./data/example_5.py:10:13: F821 undefined name 'Column' 2 | Column('data', JSONB) 3 | ^ 4 | ./data/example_5.py:10:28: F821 undefined name 'JSONB' 5 | Column('data', JSONB) 6 | ^ 7 | ./data/example_5.py:13:14: F821 undefined name 'engine' 8 | with engine.connect() as conn: 9 | ^ 10 | ./data/example_5.py:16:21: E251 unexpected spaces around keyword / parameter equals 11 | data = {"key1": "value1", "key2": "value2"} 12 | ^ 13 | ./data/example_5.py:16:23: E251 unexpected spaces around keyword / parameter equals 14 | data = {"key1": "value1", "key2": "value2"} 15 | ^ 16 | ./data/example_5.py:8:22: F821 undefined name 'Table' 17 | data_table = Table('data_table', metadata, 18 | ^ 19 | ./data/example_5.py:8:42: F821 undefined name 'metadata' 20 | data_table = Table('data_table', metadata, 21 | ^ 22 | ./data/example_5.py:9:13: E128 continuation line under-indented for visual indent 23 | Column('id', Integer, primary_key=True), 24 | ^ 25 | ./data/example_5.py:9:13: F821 undefined name 'Column' 26 | Column('id', Integer, primary_key=True), 27 | ^ 28 | ./data/example_5.py:9:26: F821 undefined name 'Integer' 29 | Column('id', Integer, primary_key=True), 30 | ^ 31 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_6.txt: -------------------------------------------------------------------------------- 1 | ./data/example_6.py:101:16: F821 undefined name 'S3Transfer' 2 | transfer = S3Transfer(boto3.client('s3', 'us-west-2')) 3 | ^ 4 | ./data/example_6.py:101:27: F821 undefined name 'boto3' 5 | transfer = S3Transfer(boto3.client('s3', 'us-west-2')) 6 | ^ 7 | ./data/example_6.py:114:14: F821 undefined name 'boto3' 8 | client = boto3.client('s3', 'us-west-2') 9 | ^ 10 | ./data/example_6.py:115:14: F821 undefined name 'TransferConfig' 11 | config = TransferConfig( 12 | ^ 13 | ./data/example_6.py:120:16: F821 undefined name 'S3Transfer' 14 | transfer = S3Transfer(client, config) 15 | ^ 16 | ./data/example_6.py:48:14: F821 undefined name 'boto3' 17 | client = boto3.client('s3', 'us-west-2') 18 | ^ 19 | ./data/example_6.py:49:16: F821 undefined name 'S3Transfer' 20 | transfer = S3Transfer(client) 21 | ^ 22 | ./data/example_6.py:84:32: F821 undefined name 'os' 23 | self._size = float(os.path.getsize(filename)) 24 | ^ 25 | ./data/example_6.py:86:26: F821 undefined name 'threading' 26 | self._lock = threading.Lock() 27 | ^ 28 | ./data/example_6.py:94:17: F821 undefined name 'sys' 29 | sys.stdout.write( 30 | ^ 31 | ./data/example_6.py:98:17: F821 undefined name 'sys' 32 | sys.stdout.flush() 33 | ^ 34 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_7.txt: -------------------------------------------------------------------------------- 1 | ./data/example_7.py:31:9: E302 expected 2 blank lines, found 0 2 | >>> class EpochSaver(CallbackAny2Vec): 3 | ^ 4 | ./data/example_7.py:39:88: E501 line too long (90 > 79 characters) 5 | ... output_path = get_tmpfile('{}_epoch{}.model'.format(self.path_prefix, self.epoch)) 6 | ^ 7 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_8.txt: -------------------------------------------------------------------------------- 1 | ./data/example_8.py:28:17: E402 module level import not at top of file 2 | >>> from gensim.parsing.porter import PorterStemmer 3 | ^ 4 | ./data/example_8.py:35:17: E402 module level import not at top of file 5 | >>> from gensim.parsing.porter import PorterStemmer 6 | ^ 7 | -------------------------------------------------------------------------------- /tests/summary_py3/summary_9.txt: -------------------------------------------------------------------------------- 1 | ./data/example_9.py:7:1: E902 TokenError: EOF in multi-line statement 2 | 3 | ^ 4 | -------------------------------------------------------------------------------- /tests/test.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/flake8-docs/flake8-rst/53ee9906661b001a6aecc06ce09cf093ce6e82df/tests/test.py -------------------------------------------------------------------------------- /tests/test_inject_options.py: -------------------------------------------------------------------------------- 1 | import optparse 2 | import pytest 3 | from hypothesis import given 4 | from hypothesis import strategies as st 5 | 6 | from flake8_rst.checker import inject_options, RstFileChecker 7 | 8 | 9 | @given(key=st.sampled_from(['ignore', 'select'])) 10 | @pytest.mark.parametrize('values, expected', [ 11 | ('F, E302, E304', ['F', 'E302', 'E304']), 12 | ('F821,L02,N', ['F821', 'L02', 'N'])]) 13 | def test_set_options(key, values, expected): 14 | options = optparse.Values({key: ['F821', 'E305']}) 15 | injected = inject_options({'set-' + key: values}, options) 16 | 17 | assert expected.sort() == injected.__dict__[key].sort() 18 | assert ['F821', 'E305'] == options.__dict__[key] 19 | 20 | 21 | @given(key=st.sampled_from(['ignore', 'select'])) 22 | @pytest.mark.parametrize('values, expected', [ 23 | ('F, E302, E304', ['F821', 'E305', 'F', 'E302', 'E304']), 24 | ('F821,L02,N', ['F821', 'E305', 'L02', 'N'])]) 25 | def test_add_options(key, values, expected): 26 | options = optparse.Values({key: ['F821', 'E305']}) 27 | injected = inject_options({'add-' + key: values}, options) 28 | 29 | assert expected.sort() == injected.__dict__[key].sort() 30 | assert ['F821', 'E305'] == options.__dict__[key] 31 | 32 | 33 | def test_selecting_decision_engine(mocker): 34 | style_guide = mocker.Mock(decider=mocker.Mock()) 35 | decider = mocker.Mock() 36 | options = mocker.Mock(max_line_length=80, verbose=0, hang_closing=False) 37 | 38 | checker = RstFileChecker('dummy.py', {}, options) 39 | checker.style_guide = style_guide 40 | checker.decider = decider 41 | 42 | checker.results 43 | 44 | assert decider is style_guide.__dict__['decider'] 45 | -------------------------------------------------------------------------------- /tests/test_precisely.py: -------------------------------------------------------------------------------- 1 | import tempfile 2 | 3 | import pytest 4 | 5 | from flake8_rst.sourceblock import SourceBlock 6 | 7 | 8 | @pytest.fixture() 9 | def options(mocker): 10 | return mocker.Mock(max_line_length=80, verbose=0, hang_closing=False, max_doc_length=100, 11 | ignore=[], bootstrap=None, default_groupnames='*.rst->*: default', 12 | disable_noqa=False) 13 | 14 | 15 | @pytest.fixture() 16 | def checks(): 17 | from flake8.plugins.manager import Checkers 18 | 19 | return Checkers() 20 | 21 | 22 | @pytest.fixture() 23 | def checker(request, options, checks): 24 | from flake8_rst.rst import find_sourcecode 25 | from flake8_rst.checker import RstFileChecker 26 | 27 | with request.param.open() as f: 28 | for code_block in find_sourcecode(str(request.param), options, f.read()): 29 | return RstFileChecker.from_sourcecode( 30 | filename=__name__, checks=checks.to_dictionary(), options=options, 31 | style_guide=None, source_block=code_block) 32 | 33 | 34 | @pytest.fixture() 35 | def summary(request, options, checks): 36 | from flake8_rst.application import Application 37 | with tempfile.NamedTemporaryFile() as file: 38 | application = Application() 39 | application.initialize(["--output-file={}".format(file.name), "--show-source"]) 40 | application.run_checks([str(request.param)]) 41 | application.report() 42 | return file.read().decode('utf-8') 43 | 44 | 45 | @pytest.fixture() 46 | def result(request): 47 | if not request.param or not request.param.exists(): 48 | return () 49 | 50 | return request.param 51 | 52 | 53 | def test_checker(request, checker, result): 54 | data = checker.run_checks() 55 | 56 | for obj in data: 57 | if isinstance(obj, list): 58 | obj.sort() 59 | 60 | if request.config.getoption('--refresh'): 61 | result.write_ast(data) 62 | 63 | assert data == result.read_ast() 64 | 65 | 66 | def test_summary(request, summary, result): 67 | path_to_data, _, _ = summary.partition('data') 68 | data = './'.join(sorted(summary.split(path_to_data))) 69 | 70 | if request.config.getoption('--refresh'): 71 | result.write_text(data) 72 | 73 | expected = result.read_text() 74 | assert data == expected 75 | 76 | 77 | def test_readline(source, checks, options): 78 | from flake8_rst.checker import RstFileChecker 79 | with source.open() as f: 80 | src = f.read() 81 | 82 | source_block = SourceBlock.from_source('', src) 83 | checker = RstFileChecker(str(source), checks, options, source_block=source_block) 84 | lines = checker.processor.read_lines() 85 | 86 | assert src == ''.join(lines) 87 | -------------------------------------------------------------------------------- /tests/test_source_block.py: -------------------------------------------------------------------------------- 1 | import doctest 2 | import optparse 3 | import pytest 4 | 5 | try: 6 | import pathlib 7 | except ImportError: 8 | import pathlib2 as pathlib 9 | 10 | from flake8_rst.rst import RST_RE, apply_default_groupnames, apply_directive_specific_options, merge_by_group 11 | from flake8_rst.sourceblock import SourceBlock, _extract_roles 12 | from hypothesis import assume, given, note, example 13 | from hypothesis import strategies as st 14 | 15 | ROOT_DIR = pathlib.Path(__file__).parent 16 | DATA_DIR = ROOT_DIR / 'data' 17 | 18 | code_strategy = st.characters(blacklist_categories=['Cc']) 19 | 20 | 21 | @given(code_strategy, code_strategy) 22 | def test_from_sourcecode(bootstrap, src): 23 | assume(bootstrap and src) 24 | 25 | code_block = SourceBlock.from_source(bootstrap, src) 26 | 27 | expected = '\n'.join([bootstrap, src]) 28 | result = code_block.complete_block 29 | 30 | assert result == expected 31 | 32 | 33 | @given(code_strategy) 34 | def test_get_correct_line(src): 35 | code_block = SourceBlock.from_source('', src) 36 | 37 | for line_number, line in enumerate(src.splitlines(True), start=1): 38 | code_line = code_block.get_code_line(line_number) 39 | assert code_line['lineno'] == line_number 40 | assert code_line['source'] == line 41 | 42 | 43 | def test_find_block(): 44 | example = DATA_DIR / 'example_1.rst' 45 | src = example.open().read() 46 | 47 | code_block = SourceBlock.from_source('', src) 48 | 49 | for match, block in zip(RST_RE.finditer(src), code_block.find_blocks(RST_RE)): 50 | origin_code = match.group('code') 51 | origin_code = ''.join(map(lambda s: s.lstrip() + '\n', origin_code.splitlines())) 52 | assert block.source_block == origin_code 53 | 54 | 55 | def test_clean_doctest(): 56 | example = DATA_DIR / 'example_1.rst' 57 | src = example.open().read() 58 | 59 | code_block = SourceBlock.from_source('', src) 60 | 61 | for match, block in zip(RST_RE.finditer(src), code_block.find_blocks(RST_RE)): 62 | origin_code = match.group('code') 63 | origin_code = ''.join((line.source for line in doctest.DocTestParser().get_examples(origin_code))) 64 | 65 | assert block.clean_doctest() 66 | assert block.source_block == origin_code 67 | assert '>>>' not in origin_code 68 | 69 | 70 | @pytest.mark.parametrize('src, expected', [ 71 | (DATA_DIR / 'example_11.rst', "name = 'Brian'\nother = brian\n%timeit a = (1, 2,name) # noqa: F821\n" 72 | "b = (3, 4, other)\nfor i in range(3):\n print(a[i] is b[i])\n\n"), 73 | (".. ipython:: python\n In [4]: grouped = df.groupby('A')\n\n In [5]: for name, group in grouped:\n" 74 | " ...: print(name)\n ...: print(group)\n ...:\n", 75 | "grouped = df.groupby('A')\nfor name, group in grouped:\n print(name)\n print(group)\n\n") 76 | ]) 77 | def test_clean_ipython(src, expected): 78 | if isinstance(src, pathlib.Path): 79 | src = src.open().read() 80 | 81 | code_block = SourceBlock.from_source('', src) 82 | 83 | block = next(code_block.find_blocks(RST_RE)) 84 | 85 | assert block.clean_ipython() 86 | assert expected == block.source_block 87 | 88 | 89 | @pytest.mark.parametrize('src, expected', [ 90 | ('%timeit a = (1, 2,name)\n', 'a = (1, 2,name)\n'), 91 | ('%time a = (1, 2,name)\nb = (3, 4, other)\n', 'a = (1, 2,name)\nb = (3, 4, other)\n'), 92 | ("%time df = pd.read_csv('big.csv')\n", "df = pd.read_csv('big.csv')\n"), 93 | ('%time df = pd.read_csv("big.csv")\n', 'df = pd.read_csv("big.csv")\n'), 94 | ('%time df = pd.read_csv("big.csv")\n%time df = pd.read_csv(\'big.csv\')\n', 95 | 'df = pd.read_csv("big.csv")\ndf = pd.read_csv(\'big.csv\')\n'), 96 | ]) 97 | def test_clean_console_syntax(src, expected): 98 | block = SourceBlock.from_source('', src) 99 | 100 | block.clean_console_syntax() 101 | block.clean_ignored_lines() 102 | 103 | assert block.source_block == expected 104 | 105 | 106 | @pytest.mark.parametrize('src', [ 107 | '%prun -l 4 f(x)\n', 108 | '%%timeit x = range(10000)\nmax(x)\n', 109 | ]) 110 | def test_ignore_unrecognized_console_syntax(src): 111 | block = SourceBlock.from_source('', src) 112 | 113 | block.clean_console_syntax() 114 | block.clean_ignored_lines() 115 | 116 | assert not block.source_block 117 | 118 | 119 | @pytest.mark.parametrize('src, expected', [ 120 | ('@okexcept\na = (1, 2,name)\n', 'a = (1, 2,name)\n'), 121 | ('@savefig "picture.png"\na = (1, 2,name)\nb = (3, 4, other)\n', 'a = (1, 2,name)\nb = (3, 4, other)\n'), 122 | ]) 123 | def test_clean_ignored_lines(src, expected): 124 | block = SourceBlock.from_source('', src) 125 | 126 | block.clean_ignored_lines() 127 | 128 | assert block.source_block == expected 129 | 130 | 131 | @given(code_strategy, code_strategy, code_strategy) 132 | def test_merge_source_blocks(bootstrap, src_1, src_2): 133 | block1 = SourceBlock.from_source(bootstrap, src_1) 134 | block2 = SourceBlock.from_source(bootstrap, src_2, len(src_1.splitlines()) + 1) 135 | expected = SourceBlock.from_source(bootstrap, src_1 + src_2) 136 | 137 | merged = SourceBlock.merge([block1, block2]) 138 | reversed_merged = SourceBlock.merge([block1, block2]) 139 | 140 | assert merged.complete_block == expected.complete_block 141 | assert reversed_merged.complete_block == expected.complete_block 142 | 143 | 144 | @pytest.mark.parametrize("filename, directive, roles, default_groupnames, expected", [ 145 | ('test.rst', 'code-block', {}, "*.rst->*: default", {'group': 'default'}), 146 | ('test.py', 'code-block', {}, "*.rst->*: default", {'group': 'None'}), 147 | ('test.rst', 'code-block', {}, "*->code-block: code-block, *->ipython: ipython", {'group': 'code-block'}), 148 | ('test.rst', 'ipython', {}, "*->code-block: code-block, *->ipython: ipython", {'group': 'ipython'}), 149 | ('test.py', 'code-block', {}, "last.py->code-block: code-block, *.rst->ipython: ipython", {'group': 'None'}), 150 | ]) 151 | def test_default_groupname(filename, directive, roles, default_groupnames, expected): 152 | func = apply_default_groupnames(lambda *a, **k: [SourceBlock([], [], directive=directive, roles=roles)]) 153 | block = next(func(filename, options=optparse.Values(dict(default_groupnames=default_groupnames)))) 154 | 155 | assert block.roles == expected 156 | 157 | 158 | @pytest.mark.parametrize("directive, roles, expected", [ 159 | ('code-block', {}, {}), 160 | ('ipython', {}, {'add-ignore': 'E302, E305'}), 161 | ('ipython', {'add-ignore': 'F'}, {'add-ignore': 'F, E302, E305'}), 162 | ]) 163 | def test_directive_specific_options(directive, roles, expected): 164 | func = apply_directive_specific_options(lambda *a, **k: [SourceBlock([], [], directive=directive, roles=roles)]) 165 | block = next(func()) 166 | 167 | assert block.roles == expected 168 | 169 | 170 | @given(role=code_strategy, value=code_strategy, comment=code_strategy) 171 | @example(role='group', value='Group#4', comment='Within 4th group.') 172 | @pytest.mark.parametrize("string_format", [u' :flake8-{role}:{value}\n', 173 | u' :flake8-{role}:{value} #{comment}\n']) 174 | def test_roles(string_format, role, value, comment): 175 | assume(role.strip() and value.strip() and comment.strip()) 176 | role_string = string_format.format(role=role, value=value, comment=comment) 177 | note(role_string) 178 | roles = _extract_roles(role_string) 179 | 180 | assert value == roles[role] 181 | 182 | 183 | @pytest.mark.parametrize("group_names, expected", [ 184 | (['None', 'None'], ['None', 'None']), 185 | (['', ''], ['']), 186 | (['A', 'B', 'A'], ['A', 'B']), 187 | (['Ignore'], []), 188 | ]) 189 | def test_merge_by_group(group_names, expected): 190 | source_blocks = [SourceBlock([], [(0, '', '')], roles={'group': group}) for group in group_names] 191 | blocks = merge_by_group(lambda *a, **k: source_blocks)() 192 | result = sorted([block.roles['group'] for block in blocks]) 193 | 194 | assert result == expected 195 | 196 | 197 | @given(code_strategy, code_strategy, st.lists(code_strategy, min_size=1)) 198 | def test_inject_bootstrap_blocks(bootstrap, src, injected_bootstrap): 199 | note(injected_bootstrap) 200 | block = SourceBlock.from_source(bootstrap, src, roles={'bootstrap': '; '.join(injected_bootstrap)}) 201 | expected = SourceBlock.from_source('\n'.join(injected_bootstrap), src) 202 | 203 | assert block.complete_block == expected.complete_block 204 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 2.0 3 | envlist = 4 | check 5 | {py27,py36}-{linux} 6 | skipsdist = True 7 | platform = linux: linux 8 | 9 | [testenv] 10 | recreate = True 11 | deps = . 12 | pathlib2 13 | pytest 14 | pytest-mock 15 | hypothesis 16 | py36: ipython 17 | py27: ipython<=5.8.0 18 | 19 | commands = py.test 20 | 21 | [testenv:check] 22 | deps = 23 | check-manifest 24 | skip_install = true 25 | commands = 26 | check-manifest {toxinidir} --------------------------------------------------------------------------------