├── .coveragerc ├── .gitignore ├── .travis.yml ├── LICENSE ├── Makefile ├── README.rst ├── pep8ify ├── __init__.py ├── fixes │ ├── __init__.py │ ├── fix_blank_lines.py │ ├── fix_compound_statements.py │ ├── fix_extraneous_whitespace.py │ ├── fix_imports_on_separate_lines.py │ ├── fix_indentation.py │ ├── fix_maximum_line_length.py │ ├── fix_missing_newline.py │ ├── fix_missing_whitespace.py │ ├── fix_tabs.py │ ├── fix_trailing_blank_lines.py │ ├── fix_trailing_whitespace.py │ ├── fix_whitespace_around_operator.py │ ├── fix_whitespace_before_inline_comment.py │ ├── fix_whitespace_before_parameters.py │ └── utils.py └── pep8ify.py ├── requirements.txt ├── setup.py ├── tests ├── fixtures │ ├── __init__.py │ ├── blank_lines │ │ ├── __init__.py │ │ ├── blank_lines1_in.py │ │ └── blank_lines1_out.py │ ├── compound_statements │ │ ├── __init__.py │ │ ├── compound_statements1_in.py │ │ ├── compound_statements1_out.py │ │ ├── compound_statements2_in.py │ │ ├── compound_statements2_out.py │ │ ├── compound_statements3_in.py │ │ └── compound_statements3_out.py │ ├── extraneous_whitespace │ │ ├── __init__.py │ │ ├── extraneous_whitespace1_in.py │ │ └── extraneous_whitespace1_out.py │ ├── imports_on_separate_lines │ │ ├── __init__.py │ │ ├── imports_on_separate_lines1_in.py │ │ ├── imports_on_separate_lines1_out.py │ │ ├── imports_on_separate_lines2_in.py │ │ ├── imports_on_separate_lines2_out.py │ │ ├── imports_on_separate_lines3_in.py │ │ └── imports_on_separate_lines3_out.py │ ├── indentation │ │ ├── __init__.py │ │ ├── indentation1_in.py │ │ ├── indentation1_out.py │ │ ├── indentation2_in.py │ │ ├── indentation2_out.py │ │ ├── indentation3_in.py │ │ ├── indentation3_out.py │ │ ├── indentation4_in.py │ │ ├── indentation4_out.py │ │ ├── indentation5_in.py │ │ ├── indentation5_out.py │ │ ├── indentation6_in.py │ │ ├── indentation6_out.py │ │ ├── indentation7_in.py │ │ ├── indentation7_out.py │ │ ├── mixed_indents_in.py │ │ └── mixed_indents_out.py │ ├── maximum_line_length │ │ ├── __init__.py │ │ ├── maximum_line_length1_in.py │ │ ├── maximum_line_length1_out.py │ │ ├── maximum_line_length2_in.py │ │ ├── maximum_line_length2_out.py │ │ ├── maximum_line_length3_in.py │ │ ├── maximum_line_length3_out.py │ │ ├── maximum_line_length4_in.py │ │ ├── maximum_line_length4_out.py │ │ ├── maximum_line_length5_in.py │ │ └── maximum_line_length5_out.py │ ├── missing_newline │ │ ├── __init__.py │ │ ├── missing_newline1_in.py │ │ ├── missing_newline1_out.py │ │ ├── missing_newline2_in.py │ │ ├── missing_newline2_out.py │ │ ├── missing_newline3_in.py │ │ ├── missing_newline3_out.py │ │ ├── missing_newline4_in.py │ │ └── missing_newline4_out.py │ ├── missing_whitespace │ │ ├── __init__.py │ │ ├── missing_whitespace1_in.py │ │ ├── missing_whitespace1_out.py │ │ ├── missing_whitespace2_in.py │ │ └── missing_whitespace2_out.py │ ├── tabs │ │ ├── __init__.py │ │ ├── tab1_in.py │ │ ├── tab1_out.py │ │ ├── tabs2_in.py │ │ └── tabs2_out.py │ ├── trailing_blank_lines │ │ ├── __init__.py │ │ ├── trailing_blank_lines1_in.py │ │ ├── trailing_blank_lines1_out.py │ │ ├── trailing_blank_lines2_in.py │ │ ├── trailing_blank_lines2_out.py │ │ ├── trailing_blank_lines3_in.py │ │ ├── trailing_blank_lines3_out.py │ │ ├── trailing_blank_lines4_in.py │ │ └── trailing_blank_lines4_out.py │ ├── trailing_whitespace │ │ ├── __init__.py │ │ ├── trailing_whitespace1_in.py │ │ ├── trailing_whitespace1_out.py │ │ ├── trailing_whitespace2_in.py │ │ └── trailing_whitespace2_out.py │ ├── whitespace_around_operator │ │ ├── __init__.py │ │ ├── whitespace_around_operator1_in.py │ │ └── whitespace_around_operator1_out.py │ ├── whitespace_before_inline_comment │ │ ├── __init__.py │ │ ├── whitespace_before_inline_comment1_in.py │ │ ├── whitespace_before_inline_comment1_out.py │ │ ├── whitespace_before_inline_comment2_in.py │ │ ├── whitespace_before_inline_comment2_out.py │ │ ├── whitespace_before_inline_comment3_in.py │ │ └── whitespace_before_inline_comment3_out.py │ └── whitespace_before_parameters │ │ ├── __init__.py │ │ ├── whitespace_before_parameters1_in.py │ │ └── whitespace_before_parameters1_out.py └── test_all_fixes.py └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | include = pep8ify/* 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Python build artefacts 2 | /build/ 3 | /dist/ 4 | /_venv/ 5 | *.py[cod] 6 | *.egg 7 | *.egg-info 8 | *.egg-link 9 | 10 | .coverage 11 | *.tox 12 | 13 | # editor artefacts 14 | *~ 15 | .#* 16 | \#*# 17 | .*.swp 18 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - 2.7 4 | - 3.2 5 | - 3.3 6 | script: make test 7 | install: 8 | - pip install -r requirements.txt 9 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2012 Steve Pulec 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELL := /bin/bash 2 | 3 | init: 4 | python setup.py develop 5 | pip install -r requirements.txt 6 | 7 | test: 8 | nosetests --with-coverage ./tests/ 9 | 10 | tdaemon: 11 | tdaemon -t nose ./tests/ --custom-args="--with-growl" 12 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Pep8ify: Clean your code with ease 2 | ================================== 3 | 4 | 5 | .. image:: https://secure.travis-ci.org/spulec/pep8ify.png?branch=master 6 | 7 | Pep8ify is a library that modifies python source code to conform to 8 | pep8_. 9 | 10 | 11 | Installation 12 | ------------ 13 | 14 | This library currently works with python 2.7, 3.2, and 3.3. 15 | 16 | To install pep8ify, simply: :: 17 | 18 | $ pip install pep8ify 19 | 20 | 21 | Usage 22 | ------------ 23 | 24 | To print a diff of changes that pep8ify will make against a particular source 25 | file or directory: :: 26 | 27 | $ pep8ify source_folder 28 | 29 | To have those changes written to the files: :: 30 | 31 | $ pep8ify -w source_folder 32 | 33 | By default, this will create backup files for each file that will be changed. 34 | You can add the `-n` option to not create the backups. Please do not do this 35 | if you are not using a version control system. Although this code is 36 | well-tested, there are most likely bugs still. 37 | 38 | For more options about running particular fixers, read the 39 | `lib2to3 documentation`_. This 40 | library is built on top of that one. 41 | 42 | Fixes 43 | ------------ 44 | 45 | A list of the available fixers can be found with the following: :: 46 | 47 | $ pep8ify -l 48 | Available transformations for the -f/--fix option: 49 | blank_lines 50 | compound_statements 51 | extraneous_whitespace 52 | imports_on_separate_lines 53 | indentation 54 | maximum_line_length 55 | missing_newline 56 | missing_whitespace 57 | tabs 58 | trailing_blank_lines 59 | trailing_whitespace 60 | whitespace_around_operator 61 | whitespace_before_inline_comment 62 | whitespace_before_parameters 63 | 64 | All of these are set to run by default except for 'maximum_line_length'. 65 | To run all fixes including 'maximum_line_length', run: :: 66 | 67 | $ pep8ify -f all -f maximum_line_length example.py 68 | 69 | 70 | .. _`lib2to3 documentation`: http://docs.python.org/library/2to3.html 71 | .. _pep8: http://www.python.org/dev/peps/pep-0008/ 72 | -------------------------------------------------------------------------------- /pep8ify/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | 4 | """ 5 | pep8ify 6 | ~~~~~~~~ 7 | 8 | :copyright: (c) 2012 by Steve Pulec. 9 | 10 | """ 11 | 12 | __title__ = 'pep8ify' 13 | __version__ = '0.0.13' 14 | __author__ = 'Steve Pulec' 15 | __license__ = 'Apache License 2.0' 16 | __copyright__ = 'Copyright 2014 Steve Pulec' 17 | -------------------------------------------------------------------------------- /pep8ify/fixes/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/pep8ify/fixes/__init__.py -------------------------------------------------------------------------------- /pep8ify/fixes/fix_blank_lines.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from .utils import find_indentation 4 | from lib2to3.pgen2 import token 5 | from lib2to3.pygram import python_symbols as symbols 6 | 7 | from .utils import (get_whitespace_before_definition, has_parent, 8 | tuplize_comments) 9 | 10 | 11 | class FixBlankLines(BaseFix): 12 | ''' 13 | Separate top-level function and class definitions with two blank lines. 14 | 15 | Method definitions inside a class are separated by a single blank line. 16 | 17 | Extra blank lines may be used (sparingly) to separate groups of related 18 | functions. Blank lines may be omitted between a bunch of related 19 | one-liners (e.g. a set of dummy implementations). 20 | 21 | Use blank lines in functions, sparingly, to indicate logical sections. 22 | ''' 23 | 24 | def match(self, node): 25 | # Get classes, non-decorateds funcs, decorators, and simple statements. 26 | # Ignore decorateds funcs since they will be taken care of with the 27 | # decorator. 28 | if (node.type == symbols.funcdef and node.parent.type != symbols. 29 | decorated or node.type == symbols.classdef and node.parent.type != 30 | symbols.decorated or node.type == symbols.decorated or node.type 31 | == symbols.simple_stmt): 32 | return True 33 | return False 34 | 35 | def transform(self, node, results): 36 | # Sometimes newlines are in prefix of current node, sometimes they're 37 | # in prefix of the prev sibling 38 | if node.prefix.count('\n'): 39 | newline_node = node 40 | else: 41 | newline_node = get_whitespace_before_definition(node) 42 | if not newline_node: 43 | # No previous node, must be the first node. 44 | return 45 | 46 | if newline_node.type in [token.INDENT, token.NEWLINE]: 47 | # If the newline_node is an indent or newline, we don't need to 48 | # worry about fixing indentation since it is not part of the 49 | # prefix. Dedents do have it as part of the prefix. 50 | curr_node_indentation = '' 51 | else: 52 | curr_node_indentation = find_indentation(node) 53 | min_lines_between_defs, max_lines_between_defs = (self. 54 | get_newline_limits(node)) 55 | new_prefix = self.trim_comments(curr_node_indentation, newline_node. 56 | prefix, min_lines_between_defs, max_lines_between_defs) 57 | 58 | if newline_node.prefix != new_prefix: 59 | newline_node.prefix = new_prefix 60 | newline_node.changed() 61 | 62 | def get_newline_limits(self, node): 63 | if node.type == symbols.simple_stmt or has_parent(node, symbols. 64 | simple_stmt): 65 | max_lines_between_defs = 1 66 | min_lines_between_defs = 0 67 | elif has_parent(node, symbols.classdef) or has_parent(node, symbols. 68 | funcdef): 69 | # If we're inside a definition, only use a single space 70 | max_lines_between_defs = 1 71 | min_lines_between_defs = 1 72 | else: 73 | # Top-level definition 74 | max_lines_between_defs = 2 75 | min_lines_between_defs = 2 76 | return (min_lines_between_defs, max_lines_between_defs) 77 | 78 | def trim_comments(self, curr_node_indentation, previous_whitespace, 79 | min_lines_between_defs, max_lines_between_defs): 80 | before_comments, comments, after_comments = tuplize_comments( 81 | previous_whitespace) 82 | 83 | if before_comments.count("\n") > max_lines_between_defs: 84 | before_comments = '\n' * max_lines_between_defs 85 | if after_comments.count("\n") > max_lines_between_defs: 86 | after_comments = '\n' * max_lines_between_defs 87 | 88 | if (before_comments.count("\n") + after_comments.count("\n") > 89 | max_lines_between_defs): 90 | if before_comments and after_comments: 91 | # If there are spaces before and after, trim them down on both 92 | # sides to either 1 before and 1 after or 0 before and 1 after. 93 | before_comments = ('\n' * (min_lines_between_defs - 1) if 94 | min_lines_between_defs else '') 95 | after_comments = '\n' 96 | 97 | comment_lines = before_comments.count("\n") + after_comments.count( 98 | "\n") 99 | if comment_lines < min_lines_between_defs: 100 | before_comments += (min_lines_between_defs - comment_lines) * '\n' 101 | result = '%s%s%s' % (before_comments, comments, after_comments) 102 | 103 | # Make sure that the result indenation matches the original indentation 104 | if result.split('\n')[-1] != curr_node_indentation: 105 | result = "%s%s" % (result.rstrip(' '), curr_node_indentation) 106 | return result 107 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_compound_statements.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from .utils import find_indentation 4 | from lib2to3.pgen2 import token 5 | from lib2to3.pygram import python_symbols as symbols 6 | from lib2to3.pytree import Node, Leaf 7 | 8 | 9 | NL = Leaf(token.NEWLINE, '\n') 10 | 11 | class FixCompoundStatements(BaseFix): 12 | """ 13 | Compound statements (multiple statements on the same line) are 14 | generally discouraged. 15 | 16 | While sometimes it's okay to put an if/for/while with a small body 17 | on the same line, never do this for multi-clause statements. Also 18 | avoid folding such long lines! 19 | """ 20 | 21 | def match(self, node): 22 | results = {} 23 | if (node.prev_sibling and isinstance(node.prev_sibling, Leaf) and node. 24 | prev_sibling.type == token.COLON and node.type != symbols.suite): 25 | # If it's inside a lambda definition, subscript, or sliceop, leave 26 | # it alone 27 | # symbols.trailer 28 | if node.parent.type in [symbols.lambdef, symbols.subscript, 29 | symbols.sliceop, symbols.dictsetmaker, symbols.trailer]: 30 | pass 31 | else: 32 | results["colon"] = True 33 | if (node.type == symbols.simple_stmt and Leaf(token.SEMI, ';') in node 34 | .children): 35 | results["semi"] = True 36 | return results 37 | 38 | def transform(self, node, results): 39 | if results.get("colon"): 40 | node = self.transform_colon(node) 41 | if results.get("semi"): 42 | node = self.transform_semi(node) 43 | 44 | def transform_colon(self, node): 45 | node_copy = node.clone() 46 | # Strip any whitespace that could have been there 47 | node_copy.prefix = node_copy.prefix.lstrip() 48 | old_depth = find_indentation(node) 49 | new_indent = '%s%s' % ((' ' * 4), old_depth) 50 | new_node = Node(symbols.suite, [Leaf(token.NEWLINE, '\n'), Leaf(token 51 | .INDENT, new_indent), node_copy, Leaf(token.DEDENT, '')]) 52 | node.replace(new_node) 53 | node.changed() 54 | 55 | # Replace node with new_node in case semi 56 | return node_copy 57 | 58 | def transform_semi(self, node): 59 | for child in node.children: 60 | if child.type == token.SEMI: 61 | next_sibling = child.next_sibling 62 | # If the next sibling is a NL, this is a trailing semicolon; 63 | # simply remove it and the NL's prefix 64 | if next_sibling == NL: 65 | child.remove() 66 | continue 67 | 68 | # Strip any whitespace from the next sibling 69 | prefix = next_sibling.prefix 70 | stripped_prefix = prefix.lstrip() 71 | if prefix != stripped_prefix: 72 | next_sibling.prefix = stripped_prefix 73 | next_sibling.changed() 74 | # Replace the semi with a newline 75 | old_depth = find_indentation(child) 76 | 77 | child.replace([Leaf(token.NEWLINE, '\n'), 78 | Leaf(token.INDENT, old_depth)]) 79 | child.changed() 80 | return node 81 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_extraneous_whitespace.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.pgen2 import token 4 | 5 | from .utils import node_text 6 | 7 | LSTRIP_TOKENS = [token.LPAR, token.LSQB, token.LBRACE] 8 | RSTRIP_TOKENS = [token.RPAR, token.RSQB, token.COLON, token.COMMA, token.SEMI, 9 | token.RBRACE] 10 | STRIP_TOKENS = RSTRIP_TOKENS + LSTRIP_TOKENS 11 | 12 | 13 | class FixExtraneousWhitespace(BaseFix): 14 | ''' 15 | Avoid extraneous whitespace in the following situations: 16 | 17 | - Immediately inside parentheses, brackets or braces. 18 | 19 | - Immediately before a comma, semicolon, or colon. 20 | ''' 21 | 22 | def match(self, node): 23 | if node.type in STRIP_TOKENS: 24 | return True 25 | return False 26 | 27 | def transform(self, node, results): 28 | if node.type in LSTRIP_TOKENS and node.get_suffix(): 29 | new_prefix = node.next_sibling.prefix.lstrip(' \t') 30 | if node.next_sibling.prefix != new_prefix: 31 | node.next_sibling.prefix = new_prefix 32 | node.next_sibling.changed() 33 | elif node.type in RSTRIP_TOKENS and not node.prefix.count('\n'): 34 | # If the prefix has a newline, this node is the beginning 35 | # of a newline, no need to do anything. 36 | new_prefix = node.prefix.rstrip(' \t') 37 | if node.prev_sibling: 38 | prev_sibling_text = node_text(node.prev_sibling) 39 | # If the previous sibling ended in a comma, we don't want to 40 | # remove this space 41 | if prev_sibling_text[-1] == ',': 42 | new_prefix = "%s " % new_prefix 43 | if node.prefix != new_prefix: 44 | node.prefix = new_prefix 45 | node.changed() 46 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_imports_on_separate_lines.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from .utils import find_indentation 4 | from lib2to3.pgen2 import token 5 | from lib2to3.pytree import Node, Leaf 6 | from lib2to3.pygram import python_symbols as symbols 7 | 8 | 9 | class FixImportsOnSeparateLines(BaseFix): 10 | ''' 11 | Imports should usually be on separate lines. 12 | ''' 13 | 14 | def match(self, node): 15 | if (node.type == symbols.simple_stmt and 16 | node.children[0].type == symbols.import_name and 17 | node.children[0].children[1].type == symbols.dotted_as_names): 18 | return node.children[0].children[1].children 19 | return False 20 | 21 | def transform(self, node, results): 22 | child_imports = [leaf.value for leaf in results if leaf.type == token. 23 | NAME] 24 | current_indentation = find_indentation(node) 25 | new_nodes = [] 26 | for index, module_name in enumerate(child_imports): 27 | new_prefix = current_indentation 28 | if not index: 29 | # Keep the prefix, if this is the first import name 30 | new_prefix = node.prefix 31 | new_nodes.append(Node(symbols.simple_stmt, [Node(symbols. 32 | import_name, [Leaf(token.NAME, 'import', prefix=new_prefix), 33 | Leaf(token.NAME, module_name, prefix=" ")]), Leaf(token. 34 | NEWLINE, '\n')])) 35 | 36 | node.replace(new_nodes) 37 | node.changed() 38 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_indentation.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.pytree import Leaf 4 | from lib2to3.pgen2 import token 5 | 6 | import re 7 | 8 | from .utils import prefix_indent_count, IS_26, add_leaves_method, NUM_SPACES, SPACES 9 | 10 | 11 | class FixIndentation(BaseFix): 12 | """ 13 | Use 4 spaces per indentation level. 14 | 15 | For really old code that you don't want to mess up, you can continue to 16 | use 8-space tabs. 17 | """ 18 | 19 | def __init__(self, options, log): 20 | self.indents = [] 21 | self.indent_level = 0 22 | self.line_num = 0 23 | self.current_line_dedent = None 24 | # This is the indent of the previous line before it was modified 25 | self.prev_line_indent = 0 26 | 27 | super(FixIndentation, self).__init__(options, log) 28 | 29 | def match(self, node): 30 | if isinstance(node, Leaf): 31 | return True 32 | return False 33 | 34 | def transform(self, node, results): 35 | if node.type == token.INDENT: 36 | self.current_line_dedent = None 37 | self.transform_indent(node) 38 | elif node.type == token.DEDENT: 39 | self.transform_outdent(node) 40 | elif self.line_num != node.lineno: 41 | self.current_line_dedent = None 42 | self.transform_newline(node) 43 | 44 | def transform_indent(self, node): 45 | if IS_26: 46 | node = add_leaves_method(node) 47 | self.line_num = node.lineno 48 | # Indent spacing is stored in the value, node the prefix 49 | self.prev_line_indent = len(node.value.replace('\t', SPACES)) 50 | self.indents.append(self.prev_line_indent) 51 | self.indent_level += 1 52 | 53 | new_value = SPACES * self.indent_level 54 | new_prefix = '\n'.join(self.align_preceding_comment(node)).rstrip(' ') 55 | 56 | if node.value != new_value or node.prefix != new_prefix: 57 | node.value = new_value 58 | node.prefix = new_prefix 59 | node.changed() 60 | 61 | def transform_outdent(self, node): 62 | if self.line_num == node.lineno: 63 | # If a line dedents more then one level (so it's a 64 | # multi-level dedent), there are several DEDENT nodes. 65 | # These have the same lineno, but only the very first one 66 | # has a prefix, the others must not. 67 | is_consecutive_indent = True 68 | assert not node.prefix # must be empty 69 | assert (self.current_line_dedent is None or 70 | self.current_line_dedent.lineno == node.lineno) 71 | else: 72 | is_consecutive_indent = False 73 | self.current_line_dedent = node 74 | assert node.prefix or node.column == 0 # must not be empty 75 | 76 | self.line_num = node.lineno 77 | self.prev_line_indent = prefix_indent_count(node) 78 | 79 | # outdent, remove highest indent 80 | self.indent_level -= 1 81 | # if the last node was a dedent, too, modify that node's prefix 82 | # and remember that node 83 | self.fix_indent_prefix(self.current_line_dedent, 84 | not is_consecutive_indent) 85 | # pop indents *after* prefix/comment has been reindented, 86 | # as the last indent-level may be needed there. 87 | self.indents.pop() 88 | 89 | 90 | def transform_newline(self, node): 91 | self.line_num = node.lineno 92 | if self.indent_level: 93 | # Don't reindent continuing lines that are already indented 94 | # past where they need to be. 95 | current_indent = prefix_indent_count(node) 96 | if current_indent <= self.prev_line_indent: 97 | self.fix_indent_prefix(node) 98 | else: 99 | # First line, no need to do anything 100 | pass 101 | 102 | def align_preceding_comment(self, node): 103 | prefix = node.prefix 104 | # Strip any previous empty lines since they shouldn't change 105 | # the comment indent 106 | comment_indent = re.sub(r'^([\s\t]*\n)?', '', prefix).find("#") 107 | if comment_indent > -1: 108 | # Determine if we should align the comment with the line before or 109 | # after 110 | # Default: indent to current level 111 | new_comment_indent = SPACES * self.indent_level 112 | 113 | if (node.type == token.INDENT and 114 | comment_indent < next(node.next_sibling.leaves()).column): 115 | # The comment is not aligned with the next indent, so 116 | # it should be aligned with the previous indent. 117 | new_comment_indent = SPACES * (self.indent_level - 1) 118 | elif node.type == token.DEDENT: 119 | # The comment is not aligned with the previous indent, so 120 | # it should be aligned with the next indent. 121 | try: 122 | level = self.indents.index(comment_indent) + 1 123 | new_comment_indent = level * SPACES 124 | except ValueError: 125 | new_comment_indent = comment_indent * ' ' 126 | # indent of comment does not match an indent level 127 | if comment_indent < self.indents[0]: 128 | # not even at indent level 1, leave unchanged 129 | new_comment_indent = comment_indent * ' ' 130 | else: 131 | i = max(i for i in self.indents if i < comment_indent) 132 | level = self.indents.index(i) + 1 133 | new_comment_indent = (level * SPACES 134 | + (comment_indent-i) * ' ') 135 | 136 | # Split the lines of comment and prepend them with the new indent 137 | # value 138 | return [(new_comment_indent + line.lstrip()) if line else '' 139 | for line in prefix.split('\n')] 140 | else: 141 | return prefix.split('\n') 142 | 143 | 144 | def fix_indent_prefix(self, node, align_comments=True): 145 | if node.prefix: 146 | 147 | if align_comments: 148 | prefix_lines = self.align_preceding_comment(node)[:-1] 149 | else: 150 | prefix_lines = node.prefix.split('\n')[:-1] 151 | prefix_lines.append(SPACES * self.indent_level) 152 | new_prefix = '\n'.join(prefix_lines) 153 | if node.prefix != new_prefix: 154 | node.prefix = new_prefix 155 | node.changed() 156 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_maximum_line_length.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.fixer_util import LParen, RParen 4 | from lib2to3.pgen2 import token 5 | from lib2to3.pygram import python_symbols as symbols 6 | from lib2to3.pytree import Leaf, Node 7 | from textwrap import TextWrapper 8 | 9 | from .utils import (tuplize_comments, get_quotes, wrap_leaves, 10 | first_child_leaf, find_indentation, IS_26, add_leaves_method) 11 | 12 | MAX_CHARS = 79 13 | OPENING_TOKENS = [token.LPAR, token.LSQB, token.LBRACE] 14 | CLOSING_TOKENS = [token.RPAR, token.RSQB, token.RBRACE] 15 | SYMBOLS_WITH_NEWLINES_IN_COLONS = [symbols.funcdef, symbols.classdef, 16 | symbols.if_stmt, symbols.for_stmt, symbols.while_stmt, symbols.lambdef, 17 | symbols.try_stmt, symbols.with_stmt] 18 | 19 | 20 | class FixMaximumLineLength(BaseFix): 21 | ''' 22 | Limit all lines to a maximum of 79 characters. 23 | 24 | There are still many devices around that are limited to 80 character 25 | lines; plus, limiting windows to 80 characters makes it possible to have 26 | several windows side-by-side. The default wrapping on such devices looks 27 | ugly. Therefore, please limit all lines to a maximum of 79 characters. 28 | For flowing long blocks of text (docstrings or comments), limiting the 29 | length to 72 characters is recommended. 30 | ''' 31 | 32 | explicit = True # The user must ask for this fixer 33 | 34 | def match(self, node): 35 | if (node.type in [token.NEWLINE] or node.type == token.COLON and node. 36 | parent.type in SYMBOLS_WITH_NEWLINES_IN_COLONS): 37 | # Sometimes the newline is wrapped into the next node, so we need 38 | # to check the colons also. 39 | if self.need_to_check_node(node): 40 | # For colon nodes, we need to add the len of the colon also 41 | return True 42 | if any(len(line) > MAX_CHARS for line in node.prefix.split('\n')): 43 | # There is a line in the prefix greater than MAX_CHARS 44 | return True 45 | return False 46 | 47 | def transform(self, node, results): 48 | if self.node_needs_splitting(node): 49 | node_to_split = node.prev_sibling 50 | if node_to_split.type == token.STRING: 51 | self.fix_docstring(node_to_split) 52 | else: 53 | if isinstance(node_to_split, Leaf): 54 | node_to_split = node_to_split.parent 55 | combined_prefix = self.fix_leaves(node_to_split) 56 | if combined_prefix: 57 | node.prefix = "%s\n%s" % (node.prefix, combined_prefix. 58 | rstrip()) 59 | if (any(len(line) > MAX_CHARS for line in node.prefix.split('\n')) or 60 | node.prefix.count("#") and node.column + len(node.prefix) > 61 | MAX_CHARS): 62 | # Need to fix the prefix 63 | self.fix_prefix(node) 64 | 65 | @staticmethod 66 | def need_to_check_node(node): 67 | # Returns if the node or it's docstring might need to be split 68 | if IS_26: 69 | node = add_leaves_method(node) 70 | if node.column > MAX_CHARS: 71 | return True 72 | if (node.type == token.COLON 73 | and node.column + len(node.value) > MAX_CHARS): 74 | return True 75 | if node.prev_sibling and any(child.column + len(child.value) 76 | > MAX_CHARS for child in node.prev_sibling.leaves()): 77 | return True 78 | 79 | @staticmethod 80 | def node_needs_splitting(node): 81 | if not node.prev_sibling: 82 | return False 83 | 84 | if IS_26: 85 | node = add_leaves_method(node) 86 | if node.type == token.NEWLINE: 87 | node_length = len(node.prefix) 88 | elif node.type == token.COLON: 89 | node_length = len(node.prefix) - len(node.value) 90 | if node.type in [token.NEWLINE, token.COLON]: 91 | if node.column - node_length > MAX_CHARS: 92 | return True 93 | 94 | for child in node.prev_sibling.leaves(): 95 | if child.type == token.STRING: 96 | lines = node.value.split('\n') 97 | if child.column + len(lines.pop(0)) > MAX_CHARS: 98 | return True 99 | elif any(len(line) > MAX_CHARS for line in lines): 100 | return True 101 | elif child.column + len(child.value) > MAX_CHARS: 102 | return True 103 | 104 | def fix_prefix(self, node): 105 | before_comments, comments, after_comments = tuplize_comments(node. 106 | prefix) 107 | 108 | # Combine all comment lines together 109 | all_comments = ' '.join([line.replace('#', '', 1).lstrip() for line 110 | in comments.split('\n')]) 111 | 112 | # It's an inline comment if it has not newlines 113 | is_inline_comment = not node.prefix.count('\n') 114 | 115 | initial_indent_level = comments.find('#') 116 | if initial_indent_level == -1: 117 | split_lines = [''] 118 | else: 119 | if is_inline_comment and node.prev_sibling: 120 | # If inline comment, find where the prev sibling started to 121 | # know how to indent lines 122 | initial_indent_level = (first_child_leaf(node.prev_sibling). 123 | column) 124 | indent = '%s# ' % (' ' * initial_indent_level) 125 | 126 | wrapper = TextWrapper(width=MAX_CHARS, initial_indent=indent, 127 | subsequent_indent=indent) 128 | split_lines = wrapper.wrap(all_comments) 129 | 130 | if is_inline_comment: 131 | # If inline comment is too long, we'll move it to the next line 132 | split_lines[0] = "\n%s" % split_lines[0] 133 | else: 134 | # We need to add back a newline that was lost above 135 | after_comments = "\n%s" % after_comments 136 | new_prefix = '%s%s%s' % (before_comments, '\n'.join(split_lines), 137 | after_comments.lstrip(' ')) 138 | # Append the trailing spaces back 139 | if node.prefix != new_prefix: 140 | node.prefix = new_prefix 141 | node.changed() 142 | 143 | def fix_docstring(self, node_to_split): 144 | # docstrings 145 | quote_start, quote_end = get_quotes(node_to_split.value) 146 | max_length = MAX_CHARS - node_to_split.column 147 | 148 | triple_quoted = quote_start.count('"""') or quote_start.count("'''") 149 | comment_indent = ' ' * (4 + node_to_split.column) 150 | 151 | if not triple_quoted: 152 | # If it's not tripled-quoted, we need to start and end each line 153 | # with quotes 154 | comment_indent = '%s%s' % (comment_indent, quote_start) 155 | # Since we will be appending the end_quote after each line after 156 | # the splitting 157 | max_length -= len(quote_end) 158 | # If it's not triple quoted, we need to paren it 159 | node_to_split.value = "(%s)" % node_to_split.value 160 | 161 | wrapper = TextWrapper(width=max_length, 162 | subsequent_indent=comment_indent) 163 | split_lines = wrapper.wrap(node_to_split.value) 164 | 165 | if not triple_quoted: 166 | # If it's not triple quoted, we need to close each line except for 167 | # the last one 168 | new_split_lines = [] 169 | for index, line in enumerate(split_lines): 170 | if index != len(split_lines) - 1: 171 | new_split_lines.append("%s%s" % (line, quote_end)) 172 | else: 173 | new_split_lines.append(line) 174 | split_lines = new_split_lines 175 | 176 | new_nodes = [Leaf(token.STRING, split_lines.pop(0))] 177 | for line in split_lines: 178 | new_nodes.extend([Leaf(token.NEWLINE, '\n'), Leaf(token.STRING, 179 | line)]) 180 | 181 | node_to_split.replace(new_nodes) 182 | node_to_split.changed() 183 | 184 | def fix_leaves(self, node_to_split): 185 | if IS_26: 186 | node_to_split = add_leaves_method(node_to_split) 187 | parent_depth = find_indentation(node_to_split) 188 | new_indent = "%s%s" % (' ' * 4, parent_depth) 189 | # For now, just indent additional lines by 4 more spaces 190 | 191 | child_leaves = [] 192 | combined_prefix = "" 193 | prev_leaf = None 194 | for index, leaf in enumerate(node_to_split.leaves()): 195 | if index and leaf.prefix.count('#'): 196 | if not combined_prefix: 197 | combined_prefix = "%s#" % new_indent 198 | combined_prefix += leaf.prefix.split('#')[-1] 199 | 200 | # We want to strip all newlines so we can properly insert newlines 201 | # where they should be 202 | if leaf.type != token.NEWLINE: 203 | if leaf.prefix.count('\n') and index: 204 | # If the line contains a newline, we need to strip all 205 | # whitespace since there were leading indent spaces 206 | if (prev_leaf and prev_leaf.type in [token.DOT, token.LPAR] 207 | or leaf.type in [token.RPAR]): 208 | leaf.prefix = "" 209 | else: 210 | leaf.prefix = " " 211 | 212 | # Append any trailing inline comments to the combined 213 | # prefix 214 | child_leaves.append(leaf) 215 | prev_leaf = leaf 216 | 217 | # Like TextWrapper, but for nodes. We split on MAX_CHARS - 1 since we 218 | # may need to insert a leading parenth. It's not great, but it would be 219 | # hard to do properly. 220 | split_leaves = wrap_leaves(child_leaves, width=MAX_CHARS - 1, 221 | subsequent_indent=new_indent) 222 | new_node = Node(node_to_split.type, []) 223 | 224 | # We want to keep track of if we are breaking inside a parenth 225 | open_count = 0 226 | need_parens = False 227 | for line_index, curr_line_nodes in enumerate(split_leaves): 228 | for node_index, curr_line_node in enumerate(curr_line_nodes): 229 | if line_index and not node_index: 230 | # If first node in non-first line, reset prefix since there 231 | # may have been spaces previously 232 | curr_line_node.prefix = new_indent 233 | new_node.append_child(curr_line_node) 234 | if curr_line_node.type in OPENING_TOKENS: 235 | open_count += 1 236 | if curr_line_node.type in CLOSING_TOKENS: 237 | open_count -= 1 238 | 239 | if line_index != len(split_leaves) - 1: 240 | # Don't add newline at the end since it it part of the next 241 | # sibling 242 | new_node.append_child(Leaf(token.NEWLINE, '\n')) 243 | 244 | # Checks if we ended a line without being surrounded by parens 245 | if open_count <= 0: 246 | need_parens = True 247 | if need_parens: 248 | # Parenthesize the parent if we're not inside parenths, braces, 249 | # brackets, since we inserted newlines between leaves 250 | parenth_before_equals = Leaf(token.EQUAL, "=") in split_leaves[0] 251 | self.parenthesize_parent(new_node, parenth_before_equals) 252 | node_to_split.replace(new_node) 253 | 254 | return combined_prefix 255 | 256 | def parenthesize_parent(self, node_to_split, parenth_before_equals): 257 | if node_to_split.type == symbols.print_stmt: 258 | self.parenthesize_print_stmt(node_to_split) 259 | elif node_to_split.type == symbols.return_stmt: 260 | self.parenthesize_after_arg(node_to_split, "return") 261 | elif node_to_split.type == symbols.expr_stmt: 262 | if parenth_before_equals: 263 | self.parenthesize_after_arg(node_to_split, "=") 264 | else: 265 | self.parenthesize_expr_stmt(node_to_split) 266 | elif node_to_split.type == symbols.import_from: 267 | self.parenthesize_after_arg(node_to_split, "import") 268 | elif node_to_split.type in [symbols.power, symbols.atom]: 269 | self.parenthesize_call_stmt(node_to_split) 270 | elif node_to_split.type in [symbols.or_test, symbols.and_test, symbols 271 | .not_test, symbols.test, symbols.arith_expr, symbols.comparison]: 272 | self.parenthesize_test(node_to_split) 273 | elif node_to_split.type == symbols.parameters: 274 | # Paramteres are always parenthesized already 275 | pass 276 | 277 | def parenthesize_test(self, node_to_split): 278 | first_child = node_to_split.children[0] 279 | if first_child != LParen(): 280 | # node_to_split.children[0] is the "print" literal strip the 281 | # current 1st child, since we will be prepending an LParen 282 | if first_child.prefix != first_child.prefix.strip(): 283 | first_child.prefix = first_child.prefix.strip() 284 | first_child.changed() 285 | left_paren = LParen() 286 | left_paren.prefix = " " 287 | node_to_split.insert_child(0, left_paren) 288 | node_to_split.append_child(RParen()) 289 | node_to_split.changed() 290 | 291 | def parenthesize_print_stmt(self, node_to_split): 292 | # print "hello there" 293 | # return a, b 294 | second_child = node_to_split.children[1] 295 | if second_child != LParen(): 296 | # node_to_split.children[0] is the "print" literal strip the 297 | # current 1st child, since we will be prepending an LParen 298 | if second_child.prefix != second_child.prefix.strip(): 299 | second_child.prefix = second_child.prefix.strip() 300 | second_child.changed() 301 | node_to_split.insert_child(1, LParen()) 302 | node_to_split.append_child(RParen()) 303 | node_to_split.changed() 304 | 305 | def parenthesize_after_arg(self, node_to_split, value): 306 | # parenthesize the leaves after the first node with the value 307 | value_index = 0 308 | for index, child in enumerate(node_to_split.children): 309 | if child.value == value: 310 | value_index = index + 1 311 | break 312 | value_child = node_to_split.children[value_index] 313 | if value_child != LParen(): 314 | # strip the current 1st child, since we will be prepending an 315 | # LParen 316 | if value_child.prefix != value_child.prefix.strip(): 317 | value_child.prefix = value_child.prefix.strip() 318 | value_child.changed() 319 | # We set a space prefix since this is after the '=' 320 | left_paren = LParen() 321 | left_paren.prefix = " " 322 | node_to_split.insert_child(value_index, left_paren) 323 | node_to_split.append_child(RParen()) 324 | node_to_split.changed() 325 | 326 | def parenthesize_expr_stmt(self, node_to_split): 327 | # x = "foo" + bar 328 | if node_to_split.children[0] != LParen(): 329 | node_to_split.insert_child(0, LParen()) 330 | node_to_split.append_child(RParen()) 331 | node_to_split.changed() 332 | 333 | def parenthesize_call_stmt(self, node_to_split): 334 | # a.b().c() 335 | first_child = node_to_split.children[0] 336 | if first_child != LParen(): 337 | # Since this can be at the beginning of a line, we can't just 338 | # strip the prefix, we need to keep leading whitespace 339 | first_child.prefix = "%s(" % first_child.prefix 340 | first_child.changed() 341 | node_to_split.append_child(RParen()) 342 | node_to_split.changed() 343 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_missing_newline.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.pygram import python_symbols as symbols 4 | 5 | from .utils import get_leaves_after_last_newline 6 | 7 | 8 | class FixMissingNewline(BaseFix): 9 | ''' 10 | The last line should have a newline. 11 | 12 | This is somewhat tricky since the parse tree 13 | sometimes categorizes newlines as token.DEDENTs 14 | ''' 15 | 16 | def match(self, node): 17 | # We only want to work with the top-level input since this should only 18 | # run once. 19 | if node.type != symbols.file_input: 20 | return 21 | 22 | leaves_after_last_newline = get_leaves_after_last_newline(node) 23 | if not any(leaf.prefix.count('\n') 24 | for leaf in leaves_after_last_newline): 25 | # If none of those have a prefix containing a newline, 26 | # we need to add one 27 | return leaves_after_last_newline[0] 28 | 29 | def transform(self, node, leaf): 30 | if leaf.prefix != '\n': 31 | leaf.prefix = '\n' 32 | leaf.changed() 33 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_missing_whitespace.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.fixer_util import Newline 4 | from lib2to3.pgen2 import token 5 | from lib2to3.pygram import python_symbols as symbols 6 | 7 | 8 | class FixMissingWhitespace(BaseFix): 9 | ''' 10 | Each comma, semicolon or colon should be followed by whitespace. 11 | ''' 12 | 13 | def match(self, node): 14 | if (node.type in (token.COLON, token.COMMA, token.SEMI) and node. 15 | get_suffix() != " "): 16 | # If there is a newline after, no space 17 | if (node.get_suffix().find('\n') == 0 or 18 | (node.next_sibling and node.next_sibling.children and 19 | node.next_sibling.children[0] == Newline())): 20 | return False 21 | # If we are using slice notation, no space necessary 22 | if node.parent.type in [symbols.subscript, symbols.sliceop]: 23 | return False 24 | return True 25 | return False 26 | 27 | def transform(self, node, results): 28 | next_sibling = node.next_sibling 29 | if not next_sibling: 30 | next_sibling = node.parent.next_sibling 31 | if not next_sibling: 32 | return 33 | new_prefix = " %s" % next_sibling.prefix.lstrip(' \t') 34 | if next_sibling.prefix != new_prefix: 35 | next_sibling.prefix = new_prefix 36 | next_sibling.changed() 37 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_tabs.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.pytree import Leaf 4 | 5 | from .utils import SPACES 6 | 7 | class FixTabs(BaseFix): 8 | ''' 9 | For new projects, spaces-only are strongly recommended over tabs. Most 10 | editors have features that make this easy to do. 11 | ''' 12 | 13 | def match(self, node): 14 | if node.prefix.count('\t') or (isinstance(node, Leaf) 15 | and node.value.count('\t')): 16 | return True 17 | return False 18 | 19 | def transform(self, node, results): 20 | new_prefix = node.prefix.replace('\t', SPACES) 21 | new_value = node.value.replace('\t', SPACES) 22 | if node.prefix != new_prefix or node.value != new_value: 23 | node.prefix = new_prefix 24 | node.value = new_value 25 | node.changed() 26 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_trailing_blank_lines.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.pygram import python_symbols as symbols 4 | 5 | from .utils import get_leaves_after_last_newline 6 | 7 | 8 | class FixTrailingBlankLines(BaseFix): 9 | ''' 10 | Trailing blank lines are superfluous. 11 | ''' 12 | 13 | def match(self, node): 14 | # We only want to work with the top-level input since this should only 15 | # run once. 16 | if node.type != symbols.file_input: 17 | return 18 | 19 | leaves_after_last_newline = get_leaves_after_last_newline(node) 20 | # Return any leaves with newlines. 21 | return [leaf for leaf in leaves_after_last_newline 22 | if leaf.prefix.count('\n')] 23 | 24 | def transform(self, node, results): 25 | for index, result in enumerate(results): 26 | if index: 27 | # We've already stripped one newline. Strip any remaining 28 | if result.prefix != result.prefix.rstrip(): 29 | result.prefix = result.prefix.rstrip() 30 | result.changed() 31 | else: 32 | # We haven't stripped any newlines yet. We need to strip all 33 | # whitespace, but leave a single newline. 34 | if result.prefix.strip(): 35 | # If there are existing comments, we need to add two 36 | # newlines in order to have a trailing newline. 37 | new_prefix = '%s\n\n' % result.prefix.rstrip() 38 | else: 39 | new_prefix = '%s\n' % result.prefix.rstrip() 40 | if result.prefix != new_prefix: 41 | result.prefix = new_prefix 42 | result.changed() 43 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_trailing_whitespace.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.pgen2 import token 4 | 5 | 6 | class FixTrailingWhitespace(BaseFix): 7 | ''' 8 | Trailing whitespace is superfluous. 9 | Except when it occurs as part of a blank line (i.e. the line is 10 | nothing but whitespace). According to Python docs[1] a line with only 11 | whitespace is considered a blank line, and is to be ignored. However, 12 | matching a blank line to its indentation level avoids mistakenly 13 | terminating a multi-line statement (e.g. class declaration) when 14 | pasting code into the standard Python interpreter. 15 | 16 | [1] http://docs.python.org/reference/lexical_analysis.html#blank-lines 17 | ''' 18 | 19 | def match(self, node): 20 | # Newlines can be from a newline token or inside a node prefix 21 | if node.type == token.NEWLINE or node.prefix.count('\n'): 22 | return True 23 | 24 | def transform(self, node, results): 25 | if node.prefix.count('#'): 26 | prefix_split = node.prefix.split('\n') 27 | # Rstrip every line except for the last one, since that is the 28 | # whitespace before this line 29 | new_prefix = '\n'.join([line.rstrip(' \t') for line in 30 | prefix_split[:-1]] + [prefix_split[-1]]) 31 | else: 32 | new_prefix = node.prefix.lstrip(' \t') 33 | if new_prefix[0:1] == '\\': 34 | # Insert a space before a backslash ending line 35 | new_prefix = " %s" % new_prefix 36 | if node.prefix != new_prefix: 37 | node.prefix = new_prefix 38 | node.changed() 39 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_whitespace_around_operator.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.pgen2 import token 4 | from lib2to3.pygram import python_symbols as symbols 5 | from lib2to3.pytree import Leaf 6 | 7 | from .utils import OPERATORS, UNARY_OPERATORS 8 | 9 | ARG_SYMBOLS = [symbols.arglist, symbols.varargslist, symbols.typedargslist] 10 | KEYWORKD_ARG_SYMBOLS = [symbols.argument, symbols.arglist, symbols. 11 | typedargslist] 12 | 13 | 14 | class FixWhitespaceAroundOperator(BaseFix): 15 | ''' 16 | Avoid extraneous whitespace in the following situations: 17 | 18 | - More than one space around an assignment (or other) operator to 19 | align it with another. 20 | ''' 21 | 22 | def match(self, node): 23 | if isinstance(node, Leaf) and node.value in OPERATORS: 24 | return True 25 | return False 26 | 27 | def transform(self, node, results): 28 | # Allow unary operators: -123, -x, +1. 29 | if (node.value in UNARY_OPERATORS and node.parent.type == symbols. 30 | factor): 31 | self.rstrip(node) 32 | # Allow argument unpacking: foo(*args, **kwargs). 33 | elif(node.value in ['*', '**'] and node.parent.type in ARG_SYMBOLS 34 | and (not node.prev_sibling or node.prev_sibling.type == token. 35 | COMMA)): 36 | self.rstrip(node) 37 | # Allow keyword assignment: foobar(foo=bar) 38 | elif node.value == '=' and node.parent.type in KEYWORKD_ARG_SYMBOLS: 39 | self.no_spaces(node) 40 | # Finally check if the spacing actually needs fixing 41 | elif(node.prefix != " " or node.get_suffix() != " "): 42 | self.spaces(node) 43 | 44 | def rstrip(self, node): 45 | next_sibling = node.next_sibling 46 | next_sibling_new_prefix = next_sibling.prefix.lstrip(' \t') 47 | if next_sibling.prefix != next_sibling_new_prefix: 48 | next_sibling.prefix = next_sibling_new_prefix 49 | next_sibling.changed() 50 | 51 | def no_spaces(self, node): 52 | if node.prefix != "": 53 | node.prefix = "" 54 | node.changed() 55 | 56 | next_sibling = node.next_sibling 57 | next_sibling_new_prefix = next_sibling.prefix.lstrip(' \t') 58 | if next_sibling.prefix != next_sibling_new_prefix: 59 | next_sibling.prefix = next_sibling_new_prefix 60 | next_sibling.changed() 61 | 62 | def spaces(self, node): 63 | if not node.prefix.count('\n'): 64 | # If there are newlines in the prefix, this is a continued line, 65 | # don't strip anything 66 | new_prefix = " %s" % node.prefix.lstrip(' \t') 67 | if node.prefix != new_prefix: 68 | node.prefix = new_prefix 69 | node.changed() 70 | 71 | next_sibling = node.next_sibling 72 | if not next_sibling: 73 | return 74 | if next_sibling.prefix.count('\n'): 75 | next_sibling_new_prefix = next_sibling.prefix.lstrip(' \t') 76 | if next_sibling_new_prefix[0:1] == '\\': 77 | # Insert a space before a backslash ending line 78 | next_sibling_new_prefix = " %s" % next_sibling_new_prefix 79 | else: 80 | next_sibling_new_prefix = " %s" % next_sibling.prefix.lstrip( 81 | ' \t') 82 | if next_sibling.prefix != next_sibling_new_prefix: 83 | next_sibling.prefix = next_sibling_new_prefix 84 | next_sibling.changed() 85 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_whitespace_before_inline_comment.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from .utils import node_text 4 | 5 | 6 | def get_previous_node(node): 7 | """ 8 | Return the node before this node. 9 | """ 10 | if node.prev_sibling: 11 | return node.prev_sibling 12 | if node.parent: 13 | return get_previous_node(node.parent) 14 | 15 | 16 | class FixWhitespaceBeforeInlineComment(BaseFix): 17 | ''' 18 | Separate inline comments by at least two spaces. 19 | 20 | An inline comment is a comment on the same line as a statement. Inline 21 | comments should be separated by at least two spaces from the statement. 22 | They should start with a # and a single space. 23 | ''' 24 | 25 | def match(self, node): 26 | # An inline comment must contain with a # 27 | if not node.prefix.count("#"): 28 | return False 29 | 30 | # If the node's prefix starts with a newline, then this is not an 31 | # inline comment because there is not code before this. 32 | if node.prefix.lstrip(" \t").startswith("\n"): 33 | return False 34 | 35 | # If the previous node ended in a newline, then this node is 36 | # starting the line so it is not an inline comment. 37 | prev_node = get_previous_node(node) 38 | if not prev_node: 39 | # If no previous node, this is not an inline comment. 40 | return False 41 | prev_node_text = node_text(prev_node) 42 | if prev_node_text.endswith('\n'): 43 | return False 44 | 45 | return True 46 | 47 | def transform(self, node, results): 48 | position = node.prefix.find("#") 49 | if position > 2: 50 | # Already more than two spaces before comment 51 | whitespace_before, comment_after = node.prefix.split("#", 1) 52 | new_prefix = "%s# %s" % (whitespace_before, comment_after.lstrip( 53 | )) 54 | else: 55 | new_prefix = " # %s" % node.prefix.replace("#", "", 1).lstrip() 56 | if node.prefix != new_prefix: 57 | node.prefix = new_prefix 58 | node.changed() 59 | -------------------------------------------------------------------------------- /pep8ify/fixes/fix_whitespace_before_parameters.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.fixer_base import BaseFix 3 | from lib2to3.pgen2 import token 4 | from lib2to3.pygram import python_symbols as symbols 5 | 6 | 7 | class FixWhitespaceBeforeParameters(BaseFix): 8 | ''' 9 | Avoid extraneous whitespace in the following situations: 10 | 11 | - Immediately before the open parenthesis that starts the argument 12 | list of a function call. 13 | 14 | - Immediately before the open parenthesis that starts an indexing or 15 | slicing. 16 | ''' 17 | 18 | def match(self, node): 19 | if (node.type in (token.LPAR, token.LSQB) and node.parent.type == 20 | symbols.trailer): 21 | return True 22 | return False 23 | 24 | def transform(self, node, results): 25 | if node.prefix != "": 26 | node.prefix = "" 27 | node.changed() 28 | -------------------------------------------------------------------------------- /pep8ify/fixes/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from lib2to3.pgen2 import token 3 | from lib2to3.pygram import python_symbols as symbols 4 | from lib2to3.pytree import Leaf 5 | import types 6 | import sys 7 | 8 | IS_26 = False 9 | if sys.version_info[0] == 2 and sys.version_info[1] == 6: 10 | IS_26 = True 11 | 12 | BINARY_OPERATORS = frozenset(['**=', '*=', '+=', '-=', '!=', '<>', 13 | '%=', '^=', '&=', '|=', '==', '/=', '//=', '<=', '>=', '<<=', '>>=', 14 | '%', '^', '&', '|', '=', '/', '//', '<', '>', '<<']) 15 | UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) 16 | OPERATORS = BINARY_OPERATORS | UNARY_OPERATORS 17 | MAX_CHARS = 79 18 | 19 | NUM_SPACES = 4 20 | SPACES = ' ' * NUM_SPACES 21 | 22 | 23 | def add_leaves_method(node): 24 | def leaves(node): 25 | if isinstance(node, Leaf): 26 | yield node 27 | else: 28 | for child in node.children: 29 | for x in leaves(child): 30 | yield x 31 | 32 | node.leaves = types.MethodType(leaves, node) 33 | other_nodes = ('prev_sibling', 'next_sibling', 'parent') 34 | for node_str in other_nodes: 35 | n = getattr(node, node_str) 36 | if n: 37 | setattr(n, 'leaves', types.MethodType(leaves, n)) 38 | return node 39 | 40 | 41 | def find_indentation(node): 42 | try: 43 | from lib2to3.fixer_util import find_indentation 44 | return find_indentation(node) 45 | except ImportError: 46 | while node is not None: 47 | if node.type == symbols.suite and len(node.children) > 2: 48 | indent = node.children[1] 49 | if indent.type == token.INDENT: 50 | return indent.value 51 | node = node.parent 52 | return "" 53 | 54 | 55 | def get_leaves_after_last_newline(node): 56 | # Get all of the leaves after the last newline leaf 57 | if IS_26: 58 | node = add_leaves_method(node) 59 | all_leaves = [] 60 | last_newline_leaf_index = -1 61 | for index, leaf in enumerate(node.leaves()): 62 | all_leaves.append(leaf) 63 | if leaf.type == token.NEWLINE: 64 | last_newline_leaf_index = index 65 | return all_leaves[last_newline_leaf_index + 1:] 66 | 67 | 68 | def first_child_leaf(node): 69 | if isinstance(node, Leaf): 70 | return node 71 | elif node.children: 72 | return first_child_leaf(node.children[0]) 73 | else: 74 | return None 75 | 76 | 77 | def node_text(node): 78 | result = "" 79 | if isinstance(node, Leaf): 80 | result += node.value 81 | elif node.children: 82 | for child in node.children: 83 | result += node_text(child) 84 | return result 85 | 86 | 87 | def get_whitespace_before_definition(node): 88 | if node.prev_sibling: 89 | return get_last_child_with_whitespace(node.prev_sibling) 90 | 91 | 92 | def get_last_child_with_whitespace(node): 93 | if IS_26: 94 | node = add_leaves_method(node) 95 | leaves = [] 96 | for leaf in node.leaves(): 97 | leaves.append(leaf) 98 | reverse_leaves = reversed(leaves) 99 | for leaf in reverse_leaves: 100 | if '\n' in leaf.prefix or leaf.value == '\n': 101 | return leaf 102 | 103 | 104 | def has_parent(node, symbol_type): 105 | # Returns if node has a parent of type symbol_type 106 | if node.parent: 107 | return node.parent.type == symbol_type or has_parent(node.parent, 108 | symbol_type) 109 | 110 | 111 | def prefix_indent_count(node): 112 | # Find the number of spaces preceding this line 113 | return len(node.prefix.split('\n')[-1].replace('\t', SPACES)) 114 | 115 | 116 | def node_length(*nodes): 117 | return sum(len(node.prefix.strip('\n\t')) + 118 | len(node.value.strip('\n\t')) for node in nodes) 119 | 120 | 121 | def tuplize_comments(prefix): 122 | # This tuplizes the newlines before and after the prefix 123 | # Given '\n\n\n # test comment\n \n' 124 | # returns (['\n\n\n'], [' # test comment\n'], [' \n']) 125 | 126 | if not prefix: 127 | return ('', '', '') 128 | 129 | # If there are no newlines, this was just a trailing comment. Leave it 130 | # alone. 131 | if not prefix.count('\n'): 132 | return ('', prefix, '') 133 | 134 | if prefix.count("#"): 135 | whitespace_before_first_comment = prefix[:prefix.index("#")] 136 | start_of_comment = whitespace_before_first_comment.rfind('\n') 137 | if prefix.count('\n') and not prefix.split('\n')[-1].strip(): 138 | # Add a single newline back if there was a newline in the ending 139 | # whitespace 140 | comments = "%s\n" % prefix[start_of_comment + 1:].rstrip() 141 | else: 142 | comments = prefix[start_of_comment + 1:].rstrip() 143 | else: 144 | if prefix.count('\n'): 145 | comments = prefix.rsplit('\n')[1] 146 | # If no comments, there are no comments except the trailing spaces 147 | # before the current line 148 | else: 149 | comments = prefix 150 | comments_start = prefix.index(comments) 151 | return prefix[:comments_start].strip(' '), comments, prefix[ 152 | comments_start + len(comments):] 153 | 154 | 155 | def get_quotes(text): 156 | # Returns the quote type start and end 157 | # Given u"ur'the string'" returns (u"ur'", u"'") 158 | 159 | if text[:2].lower() in ['br', 'ur']: 160 | leading_chars = 2 161 | elif text[:1].lower() in ['b', 'u', 'r']: 162 | leading_chars = 1 163 | else: 164 | leading_chars = 0 165 | 166 | if text[leading_chars:leading_chars + 3] in ['"""', "'''"]: 167 | # Triple-quoted string 168 | quote_start = text[:leading_chars + 3] 169 | else: 170 | # Single-quoted string 171 | quote_start = text[:leading_chars + 1] 172 | return (quote_start, quote_start[leading_chars:]) 173 | 174 | 175 | # Like TextWrapper, but for leaves 176 | def wrap_leaves(nodes, width=MAX_CHARS, initial_indent='', 177 | subsequent_indent=''): 178 | lines = [] 179 | 180 | # Fake the prefix of the first node to be the indent that it should be. 181 | # We'll set it back afterward. 182 | first_node_prefix = nodes[0].prefix 183 | nodes[0].prefix = ' ' * nodes[0].column 184 | 185 | nodes.reverse() 186 | while nodes: 187 | tracking_back = False 188 | curr_line = [] 189 | curr_len = 0 190 | 191 | # Figure out which static string will prefix this line. 192 | if lines: 193 | indent = subsequent_indent 194 | else: 195 | indent = initial_indent 196 | 197 | # Maximum width for this line. 198 | curr_width = width - len(indent) 199 | 200 | while nodes: 201 | last_node = nodes[-1] 202 | 203 | if lines and not curr_line: 204 | # Strip prefixes for subsequent lines 205 | last_node.prefix = '' 206 | 207 | curr_node_length = node_length(last_node) 208 | 209 | # Can at least squeeze this chunk onto the current line. 210 | if curr_len + curr_node_length <= curr_width: 211 | curr_line.append(nodes.pop()) 212 | curr_len += curr_node_length 213 | 214 | # Nope, this line is full. 215 | else: 216 | # only disallow breaking on/after equals if parent of this type 217 | if nodes and nodes[-1].type in [token.COMMA, token.EQUAL]: 218 | # We don't want the next line to start on one of these 219 | # tokens 220 | tracking_back = True 221 | nodes.append(curr_line.pop()) 222 | if (curr_line and curr_line[-1].type == token.EQUAL and 223 | curr_line[-1].parent.type != symbols.expr_stmt): 224 | # We don't want this line to end on one of these tokens. 225 | # Move the last two nodes back onto the list 226 | tracking_back = True 227 | nodes.extend(reversed(curr_line[-2:])) 228 | del curr_line[-2:] 229 | break 230 | 231 | # The current line is full, and the next chunk is too big to fit on 232 | # *any* line (not just this one). 233 | if nodes: 234 | next_chunk_length = node_length(nodes[-1]) 235 | if tracking_back: 236 | next_chunk_length += node_length(nodes[-2]) 237 | if next_chunk_length > curr_width: 238 | curr_line.append(nodes.pop()) 239 | if nodes and nodes[-1].type in [token.COMMA, token.EQUAL]: 240 | # We don't want the next line to start on these chars, just 241 | # add them here Check maximum_line_length3_in:4 for an 242 | # example 243 | curr_line.append(nodes.pop()) 244 | elif (len(nodes) > 2 and not curr_line and 245 | node_length(*nodes[-3:]) > curr_width): 246 | # This scenario happens when we were not able to break on an 247 | # assignment statement above and the next line is still too 248 | # long. Remove the last 3 nodes and move them to curr_line 249 | curr_line.extend(reversed(nodes[-3:])) 250 | del nodes[-3:] 251 | if nodes and nodes[-1].type in [token.COMMA, token.EQUAL]: 252 | curr_len += node_length(nodes[-1]) 253 | curr_line.append(nodes.pop()) 254 | 255 | if curr_line: 256 | curr_line[0].prefix = "%s%s" % (indent, curr_line[0].prefix) 257 | lines.append(curr_line) 258 | else: 259 | assert False, ("There was an error parsing this line." 260 | "Please report this to the package owner.") 261 | 262 | lines[0][0].prefix = first_node_prefix 263 | return lines 264 | -------------------------------------------------------------------------------- /pep8ify/pep8ify.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import lib2to3.main 4 | 5 | try: 6 | import pep8ify.fixes 7 | except ImportError: 8 | # if importing pep8ify fails, try to load from parent 9 | # directory to support running without installation 10 | import imp, os 11 | if not hasattr(os, 'getuid') or os.getuid() != 0: 12 | imp.load_module('pep8ify', *imp.find_module('pep8ify', 13 | [os.path.dirname(os.path.dirname(__file__))])) 14 | 15 | 16 | def _main(): 17 | raise SystemExit(lib2to3.main.main("pep8ify.fixes")) 18 | 19 | if __name__ == '__main__': 20 | _main() 21 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | coverage 2 | nose==1.1.2 3 | rudolf2 4 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | import pep8ify 5 | 6 | setup( 7 | name="pep8ify", 8 | license='Apache License 2.0', 9 | version=pep8ify.__version__, 10 | description="Cleans your python code to conform to pep8", 11 | author="Steve Pulec", 12 | author_email="spulec@gmail.com", 13 | url="https://github.com/spulec/pep8ify", 14 | packages=["pep8ify", "pep8ify.fixes"], 15 | entry_points={ 16 | 'console_scripts': [ 17 | 'pep8ify = pep8ify.pep8ify:_main', 18 | ], 19 | }, 20 | classifiers=[ 21 | "Environment :: Console", 22 | "Intended Audience :: Developers", 23 | "Programming Language :: Python", 24 | "Topic :: Software Development", 25 | "Topic :: Utilities", 26 | ]) 27 | -------------------------------------------------------------------------------- /tests/fixtures/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/blank_lines/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/blank_lines/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/blank_lines/blank_lines1_in.py: -------------------------------------------------------------------------------- 1 | def a(): 2 | pass 3 | 4 | 5 | # asdfasdf 6 | def b(): 7 | pass 8 | 9 | 10 | @dec1 11 | @dec2 12 | def a(): 13 | pass 14 | 15 | 16 | # Foo 17 | # Bar 18 | 19 | 20 | def b(): 21 | pass 22 | 23 | 24 | class Foo: 25 | b = 0 26 | def bar(): 27 | pass 28 | 29 | 30 | 31 | 32 | def bar2(): 33 | pass 34 | 35 | @decoratedclass 36 | class Baz: 37 | def zorp(): 38 | pass 39 | 40 | def testing345(): 41 | pass 42 | 43 | def b(n): 44 | pass 45 | 46 | def a(): 47 | pass 48 | 49 | 50 | 51 | def b(n): 52 | pass 53 | def testing123(): 54 | 55 | 56 | 57 | pass 58 | @decorator 59 | def a(): 60 | print "testing 1" 61 | 62 | 63 | 64 | # test comment 65 | print "testing 2" 66 | 67 | print "testing 3" 68 | 69 | foo = 7 70 | 71 | 72 | bar = 2 73 | -------------------------------------------------------------------------------- /tests/fixtures/blank_lines/blank_lines1_out.py: -------------------------------------------------------------------------------- 1 | def a(): 2 | pass 3 | 4 | 5 | # asdfasdf 6 | def b(): 7 | pass 8 | 9 | 10 | @dec1 11 | @dec2 12 | def a(): 13 | pass 14 | 15 | # Foo 16 | # Bar 17 | 18 | def b(): 19 | pass 20 | 21 | 22 | class Foo: 23 | b = 0 24 | 25 | def bar(): 26 | pass 27 | 28 | def bar2(): 29 | pass 30 | 31 | 32 | @decoratedclass 33 | class Baz: 34 | def zorp(): 35 | pass 36 | 37 | 38 | def testing345(): 39 | pass 40 | 41 | 42 | def b(n): 43 | pass 44 | 45 | 46 | def a(): 47 | pass 48 | 49 | 50 | def b(n): 51 | pass 52 | 53 | 54 | def testing123(): 55 | 56 | pass 57 | 58 | 59 | @decorator 60 | def a(): 61 | print "testing 1" 62 | 63 | # test comment 64 | print "testing 2" 65 | 66 | print "testing 3" 67 | 68 | foo = 7 69 | 70 | bar = 2 71 | -------------------------------------------------------------------------------- /tests/fixtures/compound_statements/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/compound_statements/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/compound_statements/compound_statements1_in.py: -------------------------------------------------------------------------------- 1 | if foo == 'blah': 2 | do_blah_thing() 3 | do_one() 4 | do_two() 5 | do_three() 6 | 7 | lambda x: 2 * x 8 | 9 | if foo == 'blah': do_blah_thing() 10 | for x in lst: total += x 11 | while t < 10: t = delay() 12 | if foo == 'blah': do_blah_thing() 13 | else: do_non_blah_thing() 14 | try: something() 15 | finally: cleanup() 16 | 17 | 18 | def func(): 19 | if foo == 'blah': 20 | four() 21 | 22 | 23 | def func(): 24 | if foo == 'blah': four(); five() 25 | 26 | 27 | def func2(): print "testing" 28 | 29 | if foo == 'blah': one(); two(); three() 30 | 31 | do_one(); do_two(); do_three() 32 | 33 | if foo == 'blah': 34 | all_one(); all_two(); all_three() 35 | -------------------------------------------------------------------------------- /tests/fixtures/compound_statements/compound_statements1_out.py: -------------------------------------------------------------------------------- 1 | if foo == 'blah': 2 | do_blah_thing() 3 | do_one() 4 | do_two() 5 | do_three() 6 | 7 | lambda x: 2 * x 8 | 9 | if foo == 'blah': 10 | do_blah_thing() 11 | for x in lst: 12 | total += x 13 | while t < 10: 14 | t = delay() 15 | if foo == 'blah': 16 | do_blah_thing() 17 | else: 18 | do_non_blah_thing() 19 | try: 20 | something() 21 | finally: 22 | cleanup() 23 | 24 | 25 | def func(): 26 | if foo == 'blah': 27 | four() 28 | 29 | 30 | def func(): 31 | if foo == 'blah': 32 | four() 33 | five() 34 | 35 | 36 | def func2(): 37 | print "testing" 38 | 39 | if foo == 'blah': 40 | one() 41 | two() 42 | three() 43 | 44 | do_one() 45 | do_two() 46 | do_three() 47 | 48 | if foo == 'blah': 49 | all_one() 50 | all_two() 51 | all_three() 52 | -------------------------------------------------------------------------------- /tests/fixtures/compound_statements/compound_statements2_in.py: -------------------------------------------------------------------------------- 1 | def testing(): 2 | return range(10)[:] 3 | -------------------------------------------------------------------------------- /tests/fixtures/compound_statements/compound_statements2_out.py: -------------------------------------------------------------------------------- 1 | def testing(): 2 | return range(10)[:] 3 | -------------------------------------------------------------------------------- /tests/fixtures/compound_statements/compound_statements3_in.py: -------------------------------------------------------------------------------- 1 | 2 | do_it() ; 3 | 4 | def x(): 5 | do_it() ; 6 | dont_do_it() 7 | 8 | def y(): 9 | do_it() ; 10 | # comment 11 | dont_do_it() 12 | -------------------------------------------------------------------------------- /tests/fixtures/compound_statements/compound_statements3_out.py: -------------------------------------------------------------------------------- 1 | 2 | do_it() 3 | 4 | 5 | def x(): 6 | do_it() 7 | dont_do_it() 8 | 9 | 10 | def y(): 11 | do_it() 12 | # comment 13 | dont_do_it() 14 | -------------------------------------------------------------------------------- /tests/fixtures/extraneous_whitespace/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/extraneous_whitespace/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/extraneous_whitespace/extraneous_whitespace1_in.py: -------------------------------------------------------------------------------- 1 | spam(ham[1], {eggs: 2}) 2 | spam( ham[1], {eggs: 2}) 3 | spam(ham[ 1], {eggs: 2}) 4 | spam(ham[1], { eggs: 2}) 5 | spam(ham[1], {eggs: 2} ) 6 | spam(ham[1 ], {eggs: 2}) 7 | spam(ham[1], {eggs: 2 }) 8 | 9 | if x == 4: 10 | print x, y 11 | x, y = y , x 12 | if x == 4 : 13 | print x, y 14 | x, y = y, x 15 | 16 | re_comments, comments, after_comments = spam( 17 | "testing") 18 | 19 | re_comments, comments, after_comments = spam( 20 | "testing") 21 | 22 | new_prefix = u"%s# %s" % ("whitespace_before", "comment_after".lstrip( 23 | )) 24 | -------------------------------------------------------------------------------- /tests/fixtures/extraneous_whitespace/extraneous_whitespace1_out.py: -------------------------------------------------------------------------------- 1 | spam(ham[1], {eggs: 2}) 2 | spam(ham[1], {eggs: 2}) 3 | spam(ham[1], {eggs: 2}) 4 | spam(ham[1], {eggs: 2}) 5 | spam(ham[1], {eggs: 2}) 6 | spam(ham[1], {eggs: 2}) 7 | spam(ham[1], {eggs: 2}) 8 | 9 | if x == 4: 10 | print x, y 11 | x, y = y, x 12 | if x == 4: 13 | print x, y 14 | x, y = y, x 15 | 16 | re_comments, comments, after_comments = spam( 17 | "testing") 18 | 19 | re_comments, comments, after_comments = spam( 20 | "testing") 21 | 22 | new_prefix = u"%s# %s" % ("whitespace_before", "comment_after".lstrip( 23 | )) 24 | -------------------------------------------------------------------------------- /tests/fixtures/imports_on_separate_lines/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/imports_on_separate_lines/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/imports_on_separate_lines/imports_on_separate_lines1_in.py: -------------------------------------------------------------------------------- 1 | import math, sys, os 2 | 3 | from subprocess import Popen, PIPE 4 | from myclas import MyClass 5 | from foo.bar.yourclass import YourClass 6 | import myclass 7 | import foo.bar.yourclass 8 | 9 | 10 | class the_class(): 11 | import os 12 | import sys 13 | 14 | def test_func(): 15 | import math 16 | 17 | def other_func(): 18 | import os, sys, math 19 | -------------------------------------------------------------------------------- /tests/fixtures/imports_on_separate_lines/imports_on_separate_lines1_out.py: -------------------------------------------------------------------------------- 1 | import math 2 | import sys 3 | import os 4 | 5 | from subprocess import Popen, PIPE 6 | from myclas import MyClass 7 | from foo.bar.yourclass import YourClass 8 | import myclass 9 | import foo.bar.yourclass 10 | 11 | 12 | class the_class(): 13 | import os 14 | import sys 15 | 16 | def test_func(): 17 | import math 18 | 19 | def other_func(): 20 | import os 21 | import sys 22 | import math 23 | -------------------------------------------------------------------------------- /tests/fixtures/imports_on_separate_lines/imports_on_separate_lines2_in.py: -------------------------------------------------------------------------------- 1 | # some comment 2 | import math, sys 3 | 4 | # some other comment 5 | 6 | import this, that 7 | 8 | 9 | class the_class(): 10 | # some comment 11 | import os, sys 12 | -------------------------------------------------------------------------------- /tests/fixtures/imports_on_separate_lines/imports_on_separate_lines2_out.py: -------------------------------------------------------------------------------- 1 | # some comment 2 | import math 3 | import sys 4 | # some other comment 5 | 6 | import this 7 | import that 8 | 9 | 10 | class the_class(): 11 | # some comment 12 | import os 13 | import sys 14 | -------------------------------------------------------------------------------- /tests/fixtures/imports_on_separate_lines/imports_on_separate_lines3_in.py: -------------------------------------------------------------------------------- 1 | # some comment 2 | """ doc string """ 3 | import math, sys 4 | 5 | class the_class(): 6 | # some comment 7 | """ doc string """ 8 | import os, sys 9 | 10 | 11 | class second_class(): 12 | some_statement 13 | import os, sys 14 | -------------------------------------------------------------------------------- /tests/fixtures/imports_on_separate_lines/imports_on_separate_lines3_out.py: -------------------------------------------------------------------------------- 1 | # some comment 2 | """ doc string """ 3 | import math 4 | import sys 5 | 6 | 7 | class the_class(): 8 | # some comment 9 | """ doc string """ 10 | import os 11 | import sys 12 | 13 | 14 | class second_class(): 15 | some_statement 16 | import os 17 | import sys 18 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/indentation/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation1_in.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def testing_func(): 5 | if (x == 5 or 6 | x == 7): 7 | pass 8 | 9 | # Comment A 10 | if not any(leaf.prefix.count(u'\n') 11 | for leaf in leaves_after_last_newline): 12 | pass 13 | # Comment B 14 | elif all(leaf.prefix.count(u'\t') 15 | for leaf in leaves_after_last_newline): 16 | pass 17 | 18 | 19 | def tester_method(): 20 | 21 | # Comment 1 22 | # Comment 3 23 | def inner_method(): 24 | pass 25 | 26 | def inner2(): 27 | # This is a two line 28 | # comment 29 | 30 | pass 31 | 32 | 33 | class tester_class(): 34 | u""" 35 | this is a docstring that 36 | needs to have its indentation fixed 37 | """ 38 | 39 | y = u"this is a string" 40 | 41 | def inner_class_method(): 42 | x = u"""this is a constant 43 | that spans over multiples lines""" 44 | pass 45 | 46 | def innter_class_method2(): 47 | # Comment 2 48 | pass 49 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation1_out.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | 4 | def testing_func(): 5 | if (x == 5 or 6 | x == 7): 7 | pass 8 | 9 | # Comment A 10 | if not any(leaf.prefix.count(u'\n') 11 | for leaf in leaves_after_last_newline): 12 | pass 13 | # Comment B 14 | elif all(leaf.prefix.count(u'\t') 15 | for leaf in leaves_after_last_newline): 16 | pass 17 | 18 | 19 | def tester_method(): 20 | 21 | # Comment 1 22 | # Comment 3 23 | def inner_method(): 24 | pass 25 | 26 | def inner2(): 27 | # This is a two line 28 | # comment 29 | 30 | pass 31 | 32 | 33 | class tester_class(): 34 | u""" 35 | this is a docstring that 36 | needs to have its indentation fixed 37 | """ 38 | 39 | y = u"this is a string" 40 | 41 | def inner_class_method(): 42 | x = u"""this is a constant 43 | that spans over multiples lines""" 44 | pass 45 | 46 | def innter_class_method2(): 47 | # Comment 2 48 | pass 49 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation2_in.py: -------------------------------------------------------------------------------- 1 | try: 2 | if one & two: 3 | print "both" 4 | except: 5 | print "failed" 6 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation2_out.py: -------------------------------------------------------------------------------- 1 | try: 2 | if one & two: 3 | print "both" 4 | except: 5 | print "failed" 6 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation3_in.py: -------------------------------------------------------------------------------- 1 | # 2 | # multi-level dedent with detent to level 1 3 | # 4 | if a: 5 | try: 6 | if one: 7 | if one & two: 8 | print "both" 9 | except: 10 | print "failed" 11 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation3_out.py: -------------------------------------------------------------------------------- 1 | # 2 | # multi-level dedent with detent to level 1 3 | # 4 | if a: 5 | try: 6 | if one: 7 | if one & two: 8 | print "both" 9 | except: 10 | print "failed" 11 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation4_in.py: -------------------------------------------------------------------------------- 1 | # 2 | 3 | 4 | class MyClass: 5 | 6 | # comment 7 | def my_func(self): 8 | if self.xxxx: 9 | self.xxxx() 10 | self.ping() 11 | 12 | def emptyline(self): 13 | return 14 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation4_out.py: -------------------------------------------------------------------------------- 1 | # 2 | 3 | 4 | class MyClass: 5 | 6 | # comment 7 | def my_func(self): 8 | if self.xxxx: 9 | self.xxxx() 10 | self.ping() 11 | 12 | def emptyline(self): 13 | return 14 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation5_in.py: -------------------------------------------------------------------------------- 1 | 2 | is_android = True 3 | try: 4 | import shutil 5 | # this comment should be intended to `import` and `if` 6 | # this ono, too 7 | # this comment should be intended, too 8 | if xxxx + 1: 9 | if yyyyy * 2: 10 | if zzzz / 3: 11 | aaaaa + 4 12 | # this should stay at `yyyy` level 13 | elif kkkk - 5: 14 | if lll + 6: 15 | mmmm * 7 16 | # this should stay at `mmm * 7` level 17 | nnnn / 8 18 | elif kkkk + 9: 19 | if lll - 10: 20 | mmmm * 11 21 | # this should stay at `mmm * 11` level 22 | else: 23 | # this should stay at `bbbb` level 24 | bbbb / 12 25 | # this should go to `eeee` level 26 | eeee 27 | except ImportError: 28 | pass 29 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation5_out.py: -------------------------------------------------------------------------------- 1 | 2 | is_android = True 3 | try: 4 | import shutil 5 | # this comment should be intended to `import` and `if` 6 | # this ono, too 7 | # this comment should be intended, too 8 | if xxxx + 1: 9 | if yyyyy * 2: 10 | if zzzz / 3: 11 | aaaaa + 4 12 | # this should stay at `yyyy` level 13 | elif kkkk - 5: 14 | if lll + 6: 15 | mmmm * 7 16 | # this should stay at `mmm * 7` level 17 | nnnn / 8 18 | elif kkkk + 9: 19 | if lll - 10: 20 | mmmm * 11 21 | # this should stay at `mmm * 11` level 22 | else: 23 | # this should stay at `bbbb` level 24 | bbbb / 12 25 | # this should go to `eeee` level 26 | eeee 27 | except ImportError: 28 | pass 29 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation6_in.py: -------------------------------------------------------------------------------- 1 | try: 2 | import shutil 3 | if xxxx + 1: 4 | if yyyyy * 2: 5 | if zzzz / 3: 6 | aaaaa + 4 7 | # this should stay at `yyyy` level + one 8 | else: 9 | pass 10 | except: 11 | end_of_program 12 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation6_out.py: -------------------------------------------------------------------------------- 1 | try: 2 | import shutil 3 | if xxxx + 1: 4 | if yyyyy * 2: 5 | if zzzz / 3: 6 | aaaaa + 4 7 | # this should stay at `yyyy` level + one 8 | else: 9 | pass 10 | except: 11 | end_of_program 12 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation7_in.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class X: 4 | # We get new broks from schedulers 5 | # REF: doc/broker-modules.png (2) 6 | def get_new_broks(self): 7 | # Get the good links tab for looping.. 8 | links = rechts 9 | 10 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/indentation7_out.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | class X: 4 | 5 | # We get new broks from schedulers 6 | # REF: doc/broker-modules.png (2) 7 | def get_new_broks(self): 8 | # Get the good links tab for looping.. 9 | links = rechts 10 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/mixed_indents_in.py: -------------------------------------------------------------------------------- 1 | # 2 | # Test mixed intents 3 | # 4 | 5 | if True: 6 | if (x == 5 or x == 7): 7 | print "ping" 8 | 9 | if True: 10 | if (x == 5 or x == 7): 11 | print "ping" 12 | 13 | 14 | def testing_func(): 15 | if True: 16 | if (x == 5 or x == 7): 17 | print "ping" 18 | 19 | if True: 20 | if (x == 5 or x == 7): 21 | print "ping" 22 | 23 | 24 | def testing_func2(): 25 | if True: 26 | if (x == 5 or x == 7): 27 | print "ping" 28 | 29 | if True: 30 | if (x == 5 or x == 7): 31 | print "ping" 32 | -------------------------------------------------------------------------------- /tests/fixtures/indentation/mixed_indents_out.py: -------------------------------------------------------------------------------- 1 | # 2 | # Test mixed intents 3 | # 4 | 5 | if True: 6 | if (x == 5 or x == 7): 7 | print "ping" 8 | 9 | if True: 10 | if (x == 5 or x == 7): 11 | print "ping" 12 | 13 | 14 | def testing_func(): 15 | if True: 16 | if (x == 5 or x == 7): 17 | print "ping" 18 | 19 | if True: 20 | if (x == 5 or x == 7): 21 | print "ping" 22 | 23 | 24 | def testing_func2(): 25 | if True: 26 | if (x == 5 or x == 7): 27 | print "ping" 28 | 29 | if True: 30 | if (x == 5 or x == 7): 31 | print "ping" 32 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/maximum_line_length/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length1_in.py: -------------------------------------------------------------------------------- 1 | testing = tuplize_comments("this is a short string") # This is an inline comment that goes over 79 chars 2 | testing = tuplize_comments("this is a longer string that breaks the 79 char limit") # This is an inline comment that goes over 79 chars 3 | 4 | LSTRIP_TOKENS = ["foobar1", "foobar1", "foobar1", "foobar1", "foobar1", "foo23", "foobar1", "foobar1"] 5 | 6 | if ("foobar" == "foobar" or "foobar" == "foobar" or "foobar" == "foobar" or "foobar2" == "foobar" 7 | or "foobar" == "foobar" or "foobar" == "foobar" or "foobar" == "foobar" or "foobar3" == "foobar"): 8 | pass 9 | new_prefix = '\n'.join([u"%s%s" % ("new_comment_indent", line.lstrip()) if line else u'' for line in "new_prefix".split('\n')]).rstrip(u' ') + "another long string" 10 | from .utils import get_whitespace_before_definition, has_parent, tuplize_comments 11 | before_comments, comments, after_comments_and_this_string_goes_on = tuplize_comments(u"asjdfsjf js ffsadasdfsf") 12 | 13 | # Comment 1 14 | new_prefix = ('\n'.join([u"%s%s" % (new_comment_indent, line.lstrip()) if line else u'' for # A Comment 15 | line in new_prefix.split('\n')]).rstrip(u' ')) 16 | 17 | 18 | class tester: 19 | u"""this is testing the maximum length of a docstring and it is very long to ensure that the test will work well""" 20 | 21 | # This is a multiple line comment in front of a method that is defined inside of a class 22 | # and this is the second line 23 | def testering(self): 24 | print u"testering" 25 | 26 | # this is another testerig comment that makes sure that we are able to test the fixer properly. 27 | def tester2(): 28 | u'''This is a long docstring that is inside of a function which is inside of a class''' 29 | new_comment_indent = u'' 30 | new_prefix = u'' 31 | # Split the lines of comment and prepend them with the new indent value 32 | if True: 33 | new_prefix = '\n'.join([u"%s%s" % (new_comment_indent, line.lstrip()) if line else u'' for line in new_prefix.split('\n')]).rstrip(u' ') 34 | # Allow unary operators: -123, -x, +1. 35 | if node.value in UNARY_OPERATORS and node.parent.type == symbols.factor: 36 | pass 37 | comment_start = 2 38 | comments = u'' 39 | prefix = u'' 40 | return prefix[:comments_start].strip(u' '), comments, prefix[comments_start + len(comments):] 41 | 42 | 43 | # This is a tester comment that ensures we are able to fix top-level comments to not be too long. 44 | def tester6(): 45 | u'this is a single quoted docstring. I don\'t like them, but some people still use them' 46 | 47 | tester9 = u"all lines over 80 chars" 48 | # If someone uses string concat like this, I'm pretty sure the interpreter punches them in the face, but we should fix it anyway 49 | print "this is going to be" + "test that ensures that" + tester9 + "will be fixed appropriately" 50 | print "%s%s" % (tester9, "and another string that will make the total length go over 80s") 51 | 52 | the_fixering = "testing" 53 | that_other_thing_that_makes_this_over_eighty_chars_total = "testing2" 54 | testering = the_fixering + that_other_thing_that_makes_this_over_eighty_chars_total 55 | 56 | 57 | def tuplize_comments(prefix): 58 | prefix = "foo" 59 | if prefix.count("#"): 60 | pass 61 | else: 62 | if prefix.count(u'\n'): 63 | comments = prefix.rsplit(u'\n')[1] # If no comments, there are no comments except the trailing spaces before the current line 64 | else: 65 | comments = prefix 66 | comments_start = prefix.index(comments) 67 | 68 | testing = tuplize_comments("this one string" + "another string that makes this line too long") 69 | 70 | 71 | def tester4(): 72 | # This is a docstring that starts with a '#' and is greater than the max chars 73 | 74 | tester_object.test_a_really_long_method().chain_it_with_another_super_long_method_name() 75 | 76 | 77 | def tester5(): 78 | if (tester1 == tester2 and tester3 == tester4 and tester5 == tester6 and tester7 == tester8): 79 | print "good testing" 80 | 81 | 82 | def tester_func(param1=u'param_value1', param2=u'param_value2', param3=u'param_value3', param4=u'param_value4'): 83 | print "good testing" 84 | 85 | tester_func(param1=u'param_value1', param2=u'param_value2', param3=u'param_value3', param4=u'param_value4') 86 | 87 | 88 | def testing_func(): 89 | if (node.type == symbols.funcdef and node.parent.type != symbols.decorated 90 | or node.type == symbols.classdef or node.type == symbols.decorated or node.type == symbols.simple_stmt): 91 | return node.type, node.type2, node.type3, node.type4, node.type5, node.type6 92 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length1_out.py: -------------------------------------------------------------------------------- 1 | testing = tuplize_comments("this is a short string") 2 | # This is an inline comment that goes over 79 chars 3 | testing = tuplize_comments( 4 | "this is a longer string that breaks the 79 char limit") 5 | # This is an inline comment that goes over 79 chars 6 | 7 | LSTRIP_TOKENS = ["foobar1", "foobar1", "foobar1", "foobar1", "foobar1", 8 | "foo23", "foobar1", "foobar1"] 9 | 10 | if ("foobar" == "foobar" or "foobar" == "foobar" or "foobar" == "foobar" or 11 | "foobar2" == "foobar" or "foobar" == "foobar" or "foobar" == "foobar" or 12 | "foobar" == "foobar" or "foobar3" == "foobar"): 13 | pass 14 | new_prefix = ('\n'.join([u"%s%s" % ("new_comment_indent", line.lstrip()) if 15 | line else u'' for line in "new_prefix".split('\n')]).rstrip(u' ') + 16 | "another long string") 17 | from .utils import (get_whitespace_before_definition, has_parent, 18 | tuplize_comments) 19 | before_comments, comments, after_comments_and_this_string_goes_on = ( 20 | tuplize_comments(u"asjdfsjf js ffsadasdfsf")) 21 | 22 | # Comment 1 23 | new_prefix = ('\n'.join([u"%s%s" % (new_comment_indent, line.lstrip()) if line 24 | else u'' for line in new_prefix.split('\n')]).rstrip(u' ')) 25 | # A Comment 26 | 27 | 28 | class tester: 29 | u"""this is testing the maximum length of a docstring and it is very long 30 | to ensure that the test will work well""" 31 | 32 | # This is a multiple line comment in front of a method that is defined 33 | # inside of a class and this is the second line 34 | def testering(self): 35 | print u"testering" 36 | 37 | # this is another testerig comment that makes sure that we are able to test 38 | # the fixer properly. 39 | def tester2(): 40 | u'''This is a long docstring that is inside of a function which is 41 | inside of a class''' 42 | new_comment_indent = u'' 43 | new_prefix = u'' 44 | # Split the lines of comment and prepend them with the new indent value 45 | if True: 46 | new_prefix = ('\n'.join([u"%s%s" % (new_comment_indent, line.lstrip 47 | ()) if line else u'' for line in new_prefix.split('\n')]). 48 | rstrip(u' ')) 49 | # Allow unary operators: -123, -x, +1. 50 | if (node.value in UNARY_OPERATORS and node.parent.type == symbols. 51 | factor): 52 | pass 53 | comment_start = 2 54 | comments = u'' 55 | prefix = u'' 56 | return prefix[:comments_start].strip(u' '), comments, prefix[ 57 | comments_start + len(comments):] 58 | 59 | 60 | # This is a tester comment that ensures we are able to fix top-level comments 61 | # to not be too long. 62 | def tester6(): 63 | (u'this is a single quoted docstring. I don\'t like them, but some people' 64 | u'still use them') 65 | 66 | tester9 = u"all lines over 80 chars" 67 | # If someone uses string concat like this, I'm pretty sure the interpreter 68 | # punches them in the face, but we should fix it anyway 69 | print("this is going to be" + "test that ensures that" + tester9 + 70 | "will be fixed appropriately") 71 | print "%s%s" % (tester9, 72 | "and another string that will make the total length go over 80s") 73 | 74 | the_fixering = "testing" 75 | that_other_thing_that_makes_this_over_eighty_chars_total = "testing2" 76 | testering = (the_fixering + 77 | that_other_thing_that_makes_this_over_eighty_chars_total) 78 | 79 | 80 | def tuplize_comments(prefix): 81 | prefix = "foo" 82 | if prefix.count("#"): 83 | pass 84 | else: 85 | if prefix.count(u'\n'): 86 | comments = prefix.rsplit(u'\n')[1] 87 | # If no comments, there are no comments except the trailing spaces 88 | # before the current line 89 | else: 90 | comments = prefix 91 | comments_start = prefix.index(comments) 92 | 93 | testing = tuplize_comments("this one string" + 94 | "another string that makes this line too long") 95 | 96 | 97 | def tester4(): 98 | # This is a docstring that starts with a '#' and is greater than the max 99 | # chars 100 | 101 | (tester_object.test_a_really_long_method(). 102 | chain_it_with_another_super_long_method_name()) 103 | 104 | 105 | def tester5(): 106 | if (tester1 == tester2 and tester3 == tester4 and tester5 == tester6 and 107 | tester7 == tester8): 108 | print "good testing" 109 | 110 | 111 | def tester_func(param1=u'param_value1', param2=u'param_value2', 112 | param3=u'param_value3', param4=u'param_value4'): 113 | print "good testing" 114 | 115 | tester_func(param1=u'param_value1', param2=u'param_value2', 116 | param3=u'param_value3', param4=u'param_value4') 117 | 118 | 119 | def testing_func(): 120 | if (node.type == symbols.funcdef and node.parent.type != symbols.decorated 121 | or node.type == symbols.classdef or node.type == symbols.decorated or 122 | node.type == symbols.simple_stmt): 123 | return (node.type, node.type2, node.type3, node.type4, node.type5, node 124 | .type6) 125 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length2_in.py: -------------------------------------------------------------------------------- 1 | class Command(LoadDataCommand): 2 | 3 | option_list = LoadDataCommand.option_list + ( 4 | make_option("-d", "--no-signals", dest="use_signals", default=True, 5 | help='Disconnects all signals during import', action="store_false"), 6 | ) 7 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length2_out.py: -------------------------------------------------------------------------------- 1 | class Command(LoadDataCommand): 2 | 3 | option_list = LoadDataCommand.option_list + (make_option("-d", 4 | "--no-signals", dest="use_signals", default=True, 5 | help='Disconnects all signals during import', action="store_false"),) 6 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length3_in.py: -------------------------------------------------------------------------------- 1 | def tester(): 2 | foo = 1 + 2 3 | if not foo: 4 | logger.error(u"This is a long logger message that goes over the max length: %s", foo) 5 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length3_out.py: -------------------------------------------------------------------------------- 1 | def tester(): 2 | foo = 1 + 2 3 | if not foo: 4 | logger.error( 5 | u"This is a long logger message that goes over the max length: %s", 6 | foo) 7 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length4_in.py: -------------------------------------------------------------------------------- 1 | class RequestForm(forms.ModelForm): 2 | company_url = forms.URLField(max_length=60, required=False, label="Company URL", widget=TextInput(attrs={'style': "width: %s;" % text_input_width}),) 3 | usage = forms.CharField(max_length=500, required=True, label="How are you planning to use this API? * \n(e.g. mobile app, local directory, etc)", widget=forms.Textarea(attrs={'class': 'forminput', 'style': "height: 100px"}),) 4 | category = models.ForeignKey('foo.bar', blank=False, null=True, help_text='You must select a category. If none is appropriate, select Other.') 5 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length4_out.py: -------------------------------------------------------------------------------- 1 | class RequestForm(forms.ModelForm): 2 | company_url = forms.URLField(max_length=60, required=False, 3 | label="Company URL", widget=TextInput(attrs={'style': "width: %s;" % 4 | text_input_width}), ) 5 | usage = forms.CharField(max_length=500, required=True, 6 | label="How are you planning to use this API? * \n(e.g. mobile app, local directory, etc)", 7 | widget=forms.Textarea(attrs={'class': 'forminput', 'style': 8 | "height: 100px"}), ) 9 | category = models.ForeignKey('foo.bar', blank=False, null=True, 10 | help_text='You must select a category. If none is appropriate, select Other.' 11 | ) 12 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length5_in.py: -------------------------------------------------------------------------------- 1 | foo = 'bar' 2 | 3 | for x in foo: 4 | print(x) 5 | -------------------------------------------------------------------------------- /tests/fixtures/maximum_line_length/maximum_line_length5_out.py: -------------------------------------------------------------------------------- 1 | foo = 'bar' 2 | 3 | for x in foo: 4 | print(x) 5 | -------------------------------------------------------------------------------- /tests/fixtures/missing_newline/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/missing_newline/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/missing_newline/missing_newline1_in.py: -------------------------------------------------------------------------------- 1 | class testing(): 2 | def tester(): 3 | pass -------------------------------------------------------------------------------- /tests/fixtures/missing_newline/missing_newline1_out.py: -------------------------------------------------------------------------------- 1 | class testing(): 2 | def tester(): 3 | pass 4 | -------------------------------------------------------------------------------- /tests/fixtures/missing_newline/missing_newline2_in.py: -------------------------------------------------------------------------------- 1 | 2 | from foo import bar 3 | 4 | a_smallish_int = 5 5 | -------------------------------------------------------------------------------- /tests/fixtures/missing_newline/missing_newline2_out.py: -------------------------------------------------------------------------------- 1 | 2 | from foo import bar 3 | 4 | a_smallish_int = 5 5 | -------------------------------------------------------------------------------- /tests/fixtures/missing_newline/missing_newline3_in.py: -------------------------------------------------------------------------------- 1 | 2 | from foo import bar 3 | 4 | a_smallish_int = 5 -------------------------------------------------------------------------------- /tests/fixtures/missing_newline/missing_newline3_out.py: -------------------------------------------------------------------------------- 1 | 2 | from foo import bar 3 | 4 | a_smallish_int = 5 5 | -------------------------------------------------------------------------------- /tests/fixtures/missing_newline/missing_newline4_in.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import foo as bar 4 | 5 | 6 | class testing(): 7 | 8 | foobar = [] 9 | 10 | def tester(): 11 | pass 12 | 13 | 14 | def standalone_func(arg): 15 | def inner_func(): 16 | print "Some stuff in here" -------------------------------------------------------------------------------- /tests/fixtures/missing_newline/missing_newline4_out.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import foo as bar 4 | 5 | 6 | class testing(): 7 | 8 | foobar = [] 9 | 10 | def tester(): 11 | pass 12 | 13 | 14 | def standalone_func(arg): 15 | def inner_func(): 16 | print "Some stuff in here" 17 | -------------------------------------------------------------------------------- /tests/fixtures/missing_whitespace/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/missing_whitespace/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/missing_whitespace/missing_whitespace1_in.py: -------------------------------------------------------------------------------- 1 | BINARY_OPERATORS = frozenset(['**=', '*=', '+=', '-=', '!=', '<>', 2 | '%=', '^=', '&=', '|=']) 3 | 4 | a = range(10) 5 | b = range(5) 6 | foo = [a, b] 7 | bar = (3,) 8 | bar = (3, 1,) 9 | foo = [1,2,] 10 | foo = [1, 3] 11 | 12 | foobar = a[1:4] 13 | foobar = a[:4] 14 | foobar = a[1:] 15 | foobar = a[1:4:2] 16 | 17 | foobar = ['a','b'] 18 | foobar = foo(bar,baz) 19 | 20 | 21 | def tester_func(): 22 | if node_to_split.type in [symbols.or_test, symbols.and_test, symbols. 23 | not_test, symbols.test, symbols.arith_expr, symbols.comparison]: 24 | pass 25 | -------------------------------------------------------------------------------- /tests/fixtures/missing_whitespace/missing_whitespace1_out.py: -------------------------------------------------------------------------------- 1 | BINARY_OPERATORS = frozenset(['**=', '*=', '+=', '-=', '!=', '<>', 2 | '%=', '^=', '&=', '|=']) 3 | 4 | a = range(10) 5 | b = range(5) 6 | foo = [a, b] 7 | bar = (3, ) 8 | bar = (3, 1, ) 9 | foo = [1, 2, ] 10 | foo = [1, 3] 11 | 12 | foobar = a[1:4] 13 | foobar = a[:4] 14 | foobar = a[1:] 15 | foobar = a[1:4:2] 16 | 17 | foobar = ['a', 'b'] 18 | foobar = foo(bar, baz) 19 | 20 | 21 | def tester_func(): 22 | if node_to_split.type in [symbols.or_test, symbols.and_test, symbols. 23 | not_test, symbols.test, symbols.arith_expr, symbols.comparison]: 24 | pass 25 | -------------------------------------------------------------------------------- /tests/fixtures/missing_whitespace/missing_whitespace2_in.py: -------------------------------------------------------------------------------- 1 | 2 | # This file will not be changed, ut pep8yif must not crash :-) 3 | 4 | def x(): 5 | return item, 6 | 7 | after = 1 8 | -------------------------------------------------------------------------------- /tests/fixtures/missing_whitespace/missing_whitespace2_out.py: -------------------------------------------------------------------------------- 1 | 2 | # This file will not be changed, ut pep8yif must not crash :-) 3 | 4 | def x(): 5 | return item, 6 | 7 | after = 1 8 | -------------------------------------------------------------------------------- /tests/fixtures/tabs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/tabs/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/tabs/tab1_in.py: -------------------------------------------------------------------------------- 1 | import foo 2 | 3 | 4 | class testing(): 5 | 6 | def tester(self): 7 | return self.blah 8 | 9 | def tester2(): 10 | print "bleh" 11 | -------------------------------------------------------------------------------- /tests/fixtures/tabs/tab1_out.py: -------------------------------------------------------------------------------- 1 | import foo 2 | 3 | 4 | class testing(): 5 | 6 | def tester(self): 7 | return self.blah 8 | 9 | def tester2(): 10 | print "bleh" 11 | -------------------------------------------------------------------------------- /tests/fixtures/tabs/tabs2_in.py: -------------------------------------------------------------------------------- 1 | try: 2 | if one and two: 3 | print "bleh" 4 | except: 5 | print "fail" 6 | -------------------------------------------------------------------------------- /tests/fixtures/tabs/tabs2_out.py: -------------------------------------------------------------------------------- 1 | try: 2 | if one and two: 3 | print "bleh" 4 | except: 5 | print "fail" 6 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_blank_lines/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/trailing_blank_lines/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/trailing_blank_lines/trailing_blank_lines1_in.py: -------------------------------------------------------------------------------- 1 | class tester(): 2 | def func1(): 3 | return 4 | 5 | def func2(): 6 | return 7 | 8 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_blank_lines/trailing_blank_lines1_out.py: -------------------------------------------------------------------------------- 1 | class tester(): 2 | def func1(): 3 | return 4 | 5 | def func2(): 6 | return 7 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_blank_lines/trailing_blank_lines2_in.py: -------------------------------------------------------------------------------- 1 | class tester(): 2 | def func1(): 3 | return 4 | 5 | def func2(): 6 | return 7 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_blank_lines/trailing_blank_lines2_out.py: -------------------------------------------------------------------------------- 1 | class tester(): 2 | def func1(): 3 | return 4 | 5 | def func2(): 6 | return 7 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_blank_lines/trailing_blank_lines3_in.py: -------------------------------------------------------------------------------- 1 | class tester(): 2 | def func1(): 3 | return 4 | 5 | def func2(): 6 | return 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_blank_lines/trailing_blank_lines3_out.py: -------------------------------------------------------------------------------- 1 | class tester(): 2 | def func1(): 3 | return 4 | 5 | def func2(): 6 | return 7 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_blank_lines/trailing_blank_lines4_in.py: -------------------------------------------------------------------------------- 1 | def a(): 2 | pass 3 | 4 | # This is commented 5 | # out 6 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_blank_lines/trailing_blank_lines4_out.py: -------------------------------------------------------------------------------- 1 | def a(): 2 | pass 3 | 4 | # This is commented 5 | # out 6 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_whitespace/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/trailing_whitespace/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/trailing_whitespace/trailing_whitespace1_in.py: -------------------------------------------------------------------------------- 1 | class tester(object): 2 | def __init__(self, attr1, attr2): 3 | self.attr1 = attr1 4 | self.attr2 = attr2 5 | self.attr3 = ["one string", 6 | "another string"] 7 | 8 | def __unicode__(self): 9 | return u"testing unicode response" 10 | 11 | def test_method(self): 12 | return self.attr1 + self.attr2 13 | 14 | def test_method2(self, suffix): 15 | return "%s %s" % (self.attr1, suffix) 16 | 17 | # This is a comment 18 | # This is another comment 19 | def test_method3(self): 20 | print("testing this %s", self.attr2) 21 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_whitespace/trailing_whitespace1_out.py: -------------------------------------------------------------------------------- 1 | class tester(object): 2 | def __init__(self, attr1, attr2): 3 | self.attr1 = attr1 4 | self.attr2 = attr2 5 | self.attr3 = ["one string", 6 | "another string"] 7 | 8 | def __unicode__(self): 9 | return u"testing unicode response" 10 | 11 | def test_method(self): 12 | return self.attr1 + self.attr2 13 | 14 | def test_method2(self, suffix): 15 | return "%s %s" % (self.attr1, suffix) 16 | 17 | # This is a comment 18 | # This is another comment 19 | def test_method3(self): 20 | print("testing this %s", self.attr2) 21 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_whitespace/trailing_whitespace2_in.py: -------------------------------------------------------------------------------- 1 | class testing(): 2 | def tester(): 3 | pass 4 | -------------------------------------------------------------------------------- /tests/fixtures/trailing_whitespace/trailing_whitespace2_out.py: -------------------------------------------------------------------------------- 1 | class testing(): 2 | def tester(): 3 | pass 4 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_around_operator/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/whitespace_around_operator/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/whitespace_around_operator/whitespace_around_operator1_in.py: -------------------------------------------------------------------------------- 1 | foo = 23 + 3 2 | foo = 4 + 5 3 | foo = 4 + 5 4 | foo = 4 + 5 5 | foo = 4 + 5 6 | 7 | i = i + 1 8 | submitted += 1 9 | x = x * 2 - 1 10 | hypot2 = x * x + y * y 11 | c = (a + b) * (a - b) 12 | foo(bar, key='word', *args, **kwargs) 13 | foo(bar, key = 'word', *args, **kwargs) 14 | 15 | x = (3 + 16 | 2) 17 | x = (3 18 | + 2) 19 | x = 3 +\ 20 | 2 21 | x = 3 + \ 22 | 2 23 | 24 | 25 | def func(foo, bar='tester'): 26 | return 5 27 | 28 | 29 | def func(foo, bar = 'tester'): 30 | return 5 31 | 32 | baz(**kwargs) 33 | negative = -1 34 | spam(-1) 35 | alpha[:-i] 36 | if not -5 < x < +5: 37 | pass 38 | lambda *args, **kw: (args, kw) 39 | lambda *args, ** kw: (args, kw) 40 | 41 | i=i+1 42 | submitted +=1 43 | x = x*2 - 1 44 | hypot2 = x*x + y*y 45 | c = (a+b) * (a-b) 46 | c = alpha -4 47 | z = x **y 48 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_around_operator/whitespace_around_operator1_out.py: -------------------------------------------------------------------------------- 1 | foo = 23 + 3 2 | foo = 4 + 5 3 | foo = 4 + 5 4 | foo = 4 + 5 5 | foo = 4 + 5 6 | 7 | i = i + 1 8 | submitted += 1 9 | x = x * 2 - 1 10 | hypot2 = x * x + y * y 11 | c = (a + b) * (a - b) 12 | foo(bar, key='word', *args, **kwargs) 13 | foo(bar, key='word', *args, **kwargs) 14 | 15 | x = (3 + 16 | 2) 17 | x = (3 18 | + 2) 19 | x = 3 + \ 20 | 2 21 | x = 3 + \ 22 | 2 23 | 24 | 25 | def func(foo, bar='tester'): 26 | return 5 27 | 28 | 29 | def func(foo, bar='tester'): 30 | return 5 31 | 32 | baz(**kwargs) 33 | negative = -1 34 | spam(-1) 35 | alpha[:-i] 36 | if not -5 < x < +5: 37 | pass 38 | lambda *args, **kw: (args, kw) 39 | lambda *args, **kw: (args, kw) 40 | 41 | i = i + 1 42 | submitted += 1 43 | x = x * 2 - 1 44 | hypot2 = x * x + y * y 45 | c = (a + b) * (a - b) 46 | c = alpha - 4 47 | z = x ** y 48 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_inline_comment/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/whitespace_before_inline_comment/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_inline_comment/whitespace_before_inline_comment1_in.py: -------------------------------------------------------------------------------- 1 | x = x + 1 # Increment x 2 | x = x + 1 # Increment x 3 | x = x + 1 # Increment x 4 | x = x + 1 #Increment x 5 | x = x + 1 # Increment x 6 | x = x + 1 #Increment x 7 | 8 | some_list = ( 9 | foobar("asdf"), # some comment, 10 | foobar2(), 11 | ) 12 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_inline_comment/whitespace_before_inline_comment1_out.py: -------------------------------------------------------------------------------- 1 | x = x + 1 # Increment x 2 | x = x + 1 # Increment x 3 | x = x + 1 # Increment x 4 | x = x + 1 # Increment x 5 | x = x + 1 # Increment x 6 | x = x + 1 # Increment x 7 | 8 | some_list = ( 9 | foobar("asdf"), # some comment, 10 | foobar2(), 11 | ) 12 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_inline_comment/whitespace_before_inline_comment2_in.py: -------------------------------------------------------------------------------- 1 | import foo 2 | # a comment 3 | 4 | foo.bar() 5 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_inline_comment/whitespace_before_inline_comment2_out.py: -------------------------------------------------------------------------------- 1 | import foo 2 | # a comment 3 | 4 | foo.bar() 5 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_inline_comment/whitespace_before_inline_comment3_in.py: -------------------------------------------------------------------------------- 1 | # comment 1 2 | # comment 2 3 | import baz 4 | 5 | baz.bar( 6 | a='foobar', 7 | # Comment between args 8 | b='foobaz', 9 | ) 10 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_inline_comment/whitespace_before_inline_comment3_out.py: -------------------------------------------------------------------------------- 1 | # comment 1 2 | # comment 2 3 | import baz 4 | 5 | baz.bar( 6 | a='foobar', 7 | # Comment between args 8 | b='foobaz', 9 | ) 10 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_parameters/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/spulec/pep8ify/cf1815f7bad9882027289bdb2f77604b68962ca7/tests/fixtures/whitespace_before_parameters/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_parameters/whitespace_before_parameters1_in.py: -------------------------------------------------------------------------------- 1 | foo = spam(1) 2 | bar = spam (1) 3 | 4 | dict['key'] = list[index] 5 | dict ['key'] = list[index] 6 | dict['key'] = list [index] 7 | 8 | foobar = ['key'] 9 | foobar(['key']) 10 | -------------------------------------------------------------------------------- /tests/fixtures/whitespace_before_parameters/whitespace_before_parameters1_out.py: -------------------------------------------------------------------------------- 1 | foo = spam(1) 2 | bar = spam(1) 3 | 4 | dict['key'] = list[index] 5 | dict['key'] = list[index] 6 | dict['key'] = list[index] 7 | 8 | foobar = ['key'] 9 | foobar(['key']) 10 | -------------------------------------------------------------------------------- /tests/test_all_fixes.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | from __future__ import unicode_literals 4 | 5 | from functools import partial 6 | import os 7 | from os.path import join 8 | import shutil 9 | from difflib import unified_diff 10 | 11 | from lib2to3.main import main 12 | 13 | FIXTURE_PATH = os.path.join(os.path.dirname(__file__), '') 14 | 15 | 16 | def setup(): 17 | pass 18 | 19 | 20 | def teardown(): 21 | # This finds all of the backup files that we created and replaces 22 | # the *_in.py files that were created for testing 23 | potential_backups = [] 24 | for root, dirs, files in os.walk(FIXTURE_PATH): 25 | for filename in files: 26 | potential_backups.append(join(root, filename)) 27 | 28 | real_backups = [potential_backup for potential_backup in potential_backups 29 | if potential_backup.endswith(".bak")] 30 | for backup in real_backups: 31 | shutil.move(backup, backup.replace(".bak", "")) 32 | 33 | 34 | def in_and_out_files_from_directory(directory): 35 | fixture_files = os.listdir(directory) 36 | fixture_in_files = [join(directory, fixture_file) 37 | for fixture_file in fixture_files if fixture_file.endswith("_in.py")] 38 | all_fixture_files = [(fixture_in, fixture_in.replace("_in.py", "_out.py")) 39 | for fixture_in in fixture_in_files] 40 | return all_fixture_files 41 | 42 | 43 | def test_all_fixtures(): 44 | for root, dirs, files in os.walk(FIXTURE_PATH): 45 | # Loop recursively through all files. If the files is in a 46 | # subdirectory, only run the fixer of the subdirectory name, else run 47 | # all fixers. 48 | for in_file, out_file in in_and_out_files_from_directory(root): 49 | fixer_to_run = None 50 | 51 | # This partial business is a hack to make the description 52 | # attribute actually work. 53 | # See http://code.google.com/p/python-nose/issues/detail?id=244#c1 54 | func = partial(check_fixture, in_file, out_file, fixer_to_run) 55 | func.description = "All fixes" 56 | if in_file.startswith(FIXTURE_PATH): 57 | func.description = in_file[len(FIXTURE_PATH):] 58 | yield (func,) 59 | 60 | 61 | test_all_fixtures.setup = setup 62 | test_all_fixtures.teardown = teardown 63 | 64 | 65 | def check_fixture(in_file, out_file, fixer): 66 | if fixer: 67 | main("pep8ify.fixes", args=['--no-diffs', '--fix', fixer, '-w', in_file]) 68 | else: 69 | main("pep8ify.fixes", args=['--no-diffs', '--fix', 'all', 70 | '--fix', 'maximum_line_length', '-w', in_file]) 71 | in_file_contents = open(in_file, 'r').readlines() 72 | out_file_contents = open(out_file, 'r').readlines() 73 | 74 | if in_file_contents != out_file_contents: 75 | text = "in_file doesn't match out_file\n" 76 | text += ''.join(unified_diff(out_file_contents, in_file_contents, 77 | 'expected', 'refactured result')) 78 | raise AssertionError(text) 79 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | # Tox (http://tox.testrun.org/) is a tool for running tests 2 | # in multiple virtualenvs. This configuration file will run the 3 | # test suite on all supported python versions. To use it, "pip install tox" 4 | # and then run "tox" from this directory. 5 | 6 | [tox] 7 | envlist = py27, py32, py33 8 | 9 | [testenv] 10 | commands = make test 11 | deps = 12 | nose==1.1.2 13 | coverage 14 | --------------------------------------------------------------------------------