├── .gitignore ├── lib ├── jedi │ ├── evaluate │ │ ├── compiled │ │ │ ├── fake │ │ │ │ ├── posix.pym │ │ │ │ ├── datetime.pym │ │ │ │ ├── io.pym │ │ │ │ ├── _weakref.pym │ │ │ │ ├── _functools.pym │ │ │ │ ├── _sqlite3.pym │ │ │ │ ├── _sre.pym │ │ │ │ └── builtins.pym │ │ │ └── fake.py │ │ ├── cache.py │ │ ├── flow_analysis.py │ │ ├── dynamic.py │ │ ├── recursion.py │ │ ├── helpers.py │ │ ├── precedence.py │ │ ├── docstrings.py │ │ ├── stdlib.py │ │ ├── sys_path.py │ │ └── analysis.py │ ├── parser │ │ ├── pgen2 │ │ │ ├── __init__.py │ │ │ ├── grammar.py │ │ │ └── parse.py │ │ ├── token.py │ │ ├── grammar3.4.txt │ │ ├── grammar2.7.txt │ │ └── tokenize.py │ ├── api │ │ ├── replstartup.py │ │ ├── usages.py │ │ ├── helpers.py │ │ ├── keywords.py │ │ └── interpreter.py │ ├── __main__.py │ ├── __init__.py │ ├── debug.py │ ├── utils.py │ ├── common.py │ ├── _compatibility.py │ ├── settings.py │ ├── refactoring.py │ └── cache.py ├── tools.py └── python-tools.coffee ├── .travis.yml ├── styles └── python-tools.less ├── menus └── python-tools.cson ├── appveyor.yml ├── package.json ├── keymaps └── python-tools.cson ├── LICENSE.md ├── CHANGELOG.md ├── README.md └── spec └── python-tools-spec.coffee /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | npm-debug.log 3 | node_modules 4 | *.pyc 5 | *.orig 6 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/compiled/fake/posix.pym: -------------------------------------------------------------------------------- 1 | def getcwd(): 2 | return '' 3 | 4 | def getcwdu(): 5 | return '' 6 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/compiled/fake/datetime.pym: -------------------------------------------------------------------------------- 1 | class datetime(): 2 | @staticmethod 3 | def now(): 4 | return datetime() 5 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/compiled/fake/io.pym: -------------------------------------------------------------------------------- 1 | class TextIOWrapper(): 2 | def __next__(self): 3 | return str() 4 | 5 | def __iter__(self): 6 | yield str() 7 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: objective-c 2 | 3 | notifications: 4 | email: 5 | on_success: never 6 | on_failure: change 7 | 8 | script: 'curl -s https://raw.githubusercontent.com/atom/ci/master/build-package.sh | sh' 9 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/compiled/fake/_weakref.pym: -------------------------------------------------------------------------------- 1 | def proxy(object, callback=None): 2 | return object 3 | 4 | class weakref(): 5 | def __init__(self, object, callback=None): 6 | self.__object = object 7 | def __call__(self): 8 | return self.__object 9 | -------------------------------------------------------------------------------- /styles/python-tools.less: -------------------------------------------------------------------------------- 1 | // The ui-variables file is provided by base themes provided by Atom. 2 | // 3 | // See https://github.com/atom/atom-dark-ui/blob/master/styles/ui-variables.less 4 | // for a full listing of what's available. 5 | @import "ui-variables"; 6 | 7 | .python-tools { 8 | } 9 | -------------------------------------------------------------------------------- /lib/jedi/parser/pgen2/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | # Modifications: 5 | # Copyright 2006 Google, Inc. All Rights Reserved. 6 | # Licensed to PSF under a Contributor Agreement. 7 | # Copyright 2014 David Halter. Integration into Jedi. 8 | # Modifications are dual-licensed: MIT and PSF. 9 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/compiled/fake/_functools.pym: -------------------------------------------------------------------------------- 1 | class partial(): 2 | def __init__(self, func, *args, **keywords): 3 | self.__func = func 4 | self.__args = args 5 | self.__keywords = keywords 6 | 7 | def __call__(self, *args, **kwargs): 8 | # TODO should be **dict(self.__keywords, **kwargs) 9 | return self.__func(*(self.__args + args), **self.__keywords) 10 | -------------------------------------------------------------------------------- /menus/python-tools.cson: -------------------------------------------------------------------------------- 1 | 'context-menu': 2 | 'atom-text-editor': [ 3 | { 4 | 'label': 'Show Usages' 5 | 'command': 'python-tools:show-usages' 6 | }, 7 | { 8 | 'label': 'Goto Definition' 9 | 'command': 'python-tools:goto-definition' 10 | }, 11 | { 12 | 'label': 'Select all String' 13 | 'command': 'python-tools:select-all-string' 14 | } 15 | ] 16 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | version: "{build}" 2 | os: Windows Server 2012 R2 3 | 4 | test: off 5 | deploy: off 6 | 7 | init: 8 | - cmd: rd /s /q %CHOCOLATEYINSTALL% 9 | - ps: iex ((new-object net.webclient).DownloadString('https://chocolatey.org/install.ps1')) 10 | 11 | install: 12 | - cinst atom -y 13 | - cd %APPVEYOR_BUILD_FOLDER% 14 | - "%LOCALAPPDATA%/atom/bin/apm install" 15 | 16 | build_script: 17 | - cd %APPVEYOR_BUILD_FOLDER% 18 | - "%LOCALAPPDATA%/atom/bin/apm test --path %LOCALAPPDATA%/atom/bin/atom.cmd" 19 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "python-tools", 3 | "main": "./lib/python-tools", 4 | "version": "0.6.9", 5 | "description": "Goto definition, show usages, refactor/rename and more for python files", 6 | "keywords": [ 7 | "python", 8 | "refactor", 9 | "jedi", 10 | "tools" 11 | ], 12 | "activationCommands": {}, 13 | "repository": "https://github.com/MichaelAquilina/python-tools", 14 | "license": "MIT", 15 | "engines": { 16 | "atom": ">=1.0.0 <2.0.0" 17 | }, 18 | "dependencies": {} 19 | } 20 | -------------------------------------------------------------------------------- /keymaps/python-tools.cson: -------------------------------------------------------------------------------- 1 | # Keybindings require three things to be fully defined: A selector that is 2 | # matched against the focused element, the keystroke and the command to 3 | # execute. 4 | # 5 | # Below is a basic keybinding which registers on all platforms by applying to 6 | # the root workspace element. 7 | 8 | # For more detailed documentation see 9 | # https://atom.io/docs/latest/behind-atom-keymaps-in-depth 10 | 'atom-text-editor[data-grammar~="python"]:not([mini])': 11 | 'ctrl-alt-u': 'python-tools:show-usages' 12 | 'ctrl-alt-g': 'python-tools:goto-definition' 13 | 'ctrl-alt-e': 'python-tools:select-all-string' 14 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/compiled/fake/_sqlite3.pym: -------------------------------------------------------------------------------- 1 | def connect(database, timeout=None, isolation_level=None, detect_types=None, factory=None): 2 | return Connection() 3 | 4 | 5 | class Connection(): 6 | def cursor(self): 7 | return Cursor() 8 | 9 | 10 | class Cursor(): 11 | def cursor(self): 12 | return Cursor() 13 | 14 | def fetchone(self): 15 | return Row() 16 | 17 | def fetchmany(self, size=cursor.arraysize): 18 | return [self.fetchone()] 19 | 20 | def fetchall(self): 21 | return [self.fetchone()] 22 | 23 | 24 | class Row(): 25 | def keys(self): 26 | return [''] 27 | -------------------------------------------------------------------------------- /lib/jedi/api/replstartup.py: -------------------------------------------------------------------------------- 1 | """ 2 | To use Jedi completion in Python interpreter, add the following in your shell 3 | setup (e.g., ``.bashrc``):: 4 | 5 | export PYTHONSTARTUP="$(python -m jedi repl)" 6 | 7 | Then you will be able to use Jedi completer in your Python interpreter:: 8 | 9 | $ python 10 | Python 2.7.2+ (default, Jul 20 2012, 22:15:08) 11 | [GCC 4.6.1] on linux2 12 | Type "help", "copyright", "credits" or "license" for more information. 13 | >>> import os 14 | >>> os.path.join().split().in # doctest: +SKIP 15 | os.path.join().split().index os.path.join().split().insert 16 | 17 | """ 18 | import jedi.utils 19 | from jedi import __version__ as __jedi_version__ 20 | 21 | print('REPL completion using Jedi %s' % __jedi_version__) 22 | jedi.utils.setup_readline() 23 | 24 | del jedi 25 | 26 | # Note: try not to do many things here, as it will contaminate global 27 | # namespace of the interpreter. 28 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright (c) 2015 Michael Aquilina 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining 4 | a copy of this software and associated documentation files (the 5 | "Software"), to deal in the Software without restriction, including 6 | without limitation the rights to use, copy, modify, merge, publish, 7 | distribute, sublicense, and/or sell copies of the Software, and to 8 | permit persons to whom the Software is furnished to do so, subject to 9 | the following conditions: 10 | 11 | The above copyright notice and this permission notice shall be 12 | included in all copies or substantial portions of the Software. 13 | 14 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 15 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 16 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 17 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 18 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 19 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 20 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /lib/jedi/__main__.py: -------------------------------------------------------------------------------- 1 | from sys import argv 2 | from os.path import join, dirname, abspath, isdir 3 | 4 | 5 | if len(argv) == 2 and argv[1] == 'repl': 6 | # don't want to use __main__ only for repl yet, maybe we want to use it for 7 | # something else. So just use the keyword ``repl`` for now. 8 | print(join(dirname(abspath(__file__)), 'api', 'replstartup.py')) 9 | elif len(argv) > 1 and argv[1] == 'linter': 10 | """ 11 | This is a pre-alpha API. You're not supposed to use it at all, except for 12 | testing. It will very likely change. 13 | """ 14 | import jedi 15 | import sys 16 | 17 | if '--debug' in sys.argv: 18 | jedi.set_debug_function() 19 | 20 | for path in sys.argv[2:]: 21 | if path.startswith('--'): 22 | continue 23 | if isdir(path): 24 | import fnmatch 25 | import os 26 | 27 | paths = [] 28 | for root, dirnames, filenames in os.walk(path): 29 | for filename in fnmatch.filter(filenames, '*.py'): 30 | paths.append(os.path.join(root, filename)) 31 | else: 32 | paths = [path] 33 | 34 | try: 35 | for path in paths: 36 | for error in jedi.Script(path=path)._analysis(): 37 | print(error) 38 | except Exception: 39 | if '--pdb' in sys.argv: 40 | import pdb 41 | pdb.post_mortem() 42 | else: 43 | raise 44 | -------------------------------------------------------------------------------- /lib/jedi/parser/token.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from jedi._compatibility import is_py3 4 | from token import * 5 | 6 | 7 | COMMENT = N_TOKENS 8 | tok_name[COMMENT] = 'COMMENT' 9 | N_TOKENS += 1 10 | 11 | NL = N_TOKENS 12 | tok_name[NL] = 'NL' 13 | N_TOKENS += 1 14 | 15 | if is_py3: 16 | BACKQUOTE = N_TOKENS 17 | tok_name[BACKQUOTE] = 'BACKQUOTE' 18 | N_TOKENS += 1 19 | else: 20 | RARROW = N_TOKENS 21 | tok_name[RARROW] = 'RARROW' 22 | N_TOKENS += 1 23 | ELLIPSIS = N_TOKENS 24 | tok_name[ELLIPSIS] = 'ELLIPSIS' 25 | N_TOKENS += 1 26 | 27 | 28 | 29 | # Map from operator to number (since tokenize doesn't do this) 30 | 31 | opmap_raw = """\ 32 | ( LPAR 33 | ) RPAR 34 | [ LSQB 35 | ] RSQB 36 | : COLON 37 | , COMMA 38 | ; SEMI 39 | + PLUS 40 | - MINUS 41 | * STAR 42 | / SLASH 43 | | VBAR 44 | & AMPER 45 | < LESS 46 | > GREATER 47 | = EQUAL 48 | . DOT 49 | % PERCENT 50 | ` BACKQUOTE 51 | { LBRACE 52 | } RBRACE 53 | @ AT 54 | == EQEQUAL 55 | != NOTEQUAL 56 | <> NOTEQUAL 57 | <= LESSEQUAL 58 | >= GREATEREQUAL 59 | ~ TILDE 60 | ^ CIRCUMFLEX 61 | << LEFTSHIFT 62 | >> RIGHTSHIFT 63 | ** DOUBLESTAR 64 | += PLUSEQUAL 65 | -= MINEQUAL 66 | *= STAREQUAL 67 | /= SLASHEQUAL 68 | %= PERCENTEQUAL 69 | &= AMPEREQUAL 70 | |= VBAREQUAL 71 | ^= CIRCUMFLEXEQUAL 72 | <<= LEFTSHIFTEQUAL 73 | >>= RIGHTSHIFTEQUAL 74 | **= DOUBLESTAREQUAL 75 | // DOUBLESLASH 76 | //= DOUBLESLASHEQUAL 77 | -> RARROW 78 | ... ELLIPSIS 79 | """ 80 | 81 | opmap = {} 82 | for line in opmap_raw.splitlines(): 83 | op, name = line.split() 84 | opmap[op] = globals()[name] 85 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | 0.6.8 2 | ----- 3 | * Fix Issue #36 - python-tools unable to find python executable on Windows Machine 4 | * Mark known failing tests as skipped 5 | 6 | 0.6.7 7 | ----- 8 | * Better error handling and logging when something fails in the python handler 9 | 10 | 0.6.6 11 | ----- 12 | * Remove deprecated scrollToTop 13 | 14 | 0.6.5 15 | ----- 16 | * More informative error messages 17 | 18 | 0.6.4 19 | ----- 20 | * Add the following to the default list of windows paths to look up for python 21 | * C:\\Python27 22 | * C:\\Python34 23 | * C:\\Python35 24 | * Fix a nasty bug where the number of tabs open from goto definitions would grow for each command sent 25 | * Python tools is now only run in editors specifically running the python grammar 26 | * Fix an issue where report link would not show on package error 27 | * Improvements to test coverage 28 | 29 | 0.6.3 30 | ----- 31 | 32 | * I'll be making an effort to maintain a changelog on each release now so that you guys can actually know what's creeping in with each update. 33 | 34 | * From the initial release, a number of minor bug fixes have been released and the package now has some test coverage to prevent breakages. 35 | 36 | * Notifications are now displayed when no results are returned by Jedi. If you find something is not acting the way you expect it to, it probably is Jedi failing to find what you're looking for. You should probably post an Issue on their page. 37 | 38 | * The README has been updated to explain each feature with a short gif demo . 39 | 40 | * A new feature in the form of "Select String" has been added. See the README for a demo. 41 | -------------------------------------------------------------------------------- /lib/jedi/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Jedi is a static analysis tool for Python that can be used in IDEs/editors. Its 3 | historic focus is autocompletion, but does static analysis for now as well. 4 | Jedi is fast and is very well tested. It understands Python on a deeper level 5 | than all other static analysis frameworks for Python. 6 | 7 | Jedi has support for two different goto functions. It's possible to search for 8 | related names and to list all names in a Python file and infer them. Jedi 9 | understands docstrings and you can use Jedi autocompletion in your REPL as 10 | well. 11 | 12 | Jedi uses a very simple API to connect with IDE's. There's a reference 13 | implementation as a `VIM-Plugin `_, 14 | which uses Jedi's autocompletion. We encourage you to use Jedi in your IDEs. 15 | It's really easy. 16 | 17 | To give you a simple example how you can use the Jedi library, here is an 18 | example for the autocompletion feature: 19 | 20 | >>> import jedi 21 | >>> source = ''' 22 | ... import datetime 23 | ... datetime.da''' 24 | >>> script = jedi.Script(source, 3, len('datetime.da'), 'example.py') 25 | >>> script 26 | 27 | >>> completions = script.completions() 28 | >>> completions #doctest: +ELLIPSIS 29 | [, , ...] 30 | >>> print(completions[0].complete) 31 | te 32 | >>> print(completions[0].name) 33 | date 34 | 35 | As you see Jedi is pretty simple and allows you to concentrate on writing a 36 | good text editor, while still having very good IDE features for Python. 37 | """ 38 | 39 | __version__ = '0.9.0' 40 | 41 | from jedi.api import Script, Interpreter, NotFoundError, set_debug_function 42 | from jedi.api import preload_module, defined_names, names 43 | from jedi import settings 44 | -------------------------------------------------------------------------------- /lib/jedi/api/usages.py: -------------------------------------------------------------------------------- 1 | from jedi._compatibility import unicode 2 | from jedi.api import classes 3 | from jedi.parser import tree 4 | from jedi.evaluate import imports 5 | 6 | 7 | def usages(evaluator, definition_names, mods): 8 | """ 9 | :param definitions: list of Name 10 | """ 11 | def compare_array(definitions): 12 | """ `definitions` are being compared by module/start_pos, because 13 | sometimes the id's of the objects change (e.g. executions). 14 | """ 15 | result = [] 16 | for d in definitions: 17 | module = d.get_parent_until() 18 | result.append((module, d.start_pos)) 19 | return result 20 | 21 | search_name = unicode(list(definition_names)[0]) 22 | compare_definitions = compare_array(definition_names) 23 | mods |= set([d.get_parent_until() for d in definition_names]) 24 | definitions = [] 25 | for m in imports.get_modules_containing_name(evaluator, mods, search_name): 26 | try: 27 | check_names = m.used_names[search_name] 28 | except KeyError: 29 | continue 30 | for name in check_names: 31 | 32 | result = evaluator.goto(name) 33 | if [c for c in compare_array(result) if c in compare_definitions]: 34 | definitions.append(classes.Definition(evaluator, name)) 35 | # Previous definitions might be imports, so include them 36 | # (because goto might return that import name). 37 | compare_definitions += compare_array([name]) 38 | return definitions 39 | 40 | 41 | def usages_add_import_modules(evaluator, definitions): 42 | """ Adds the modules of the imports """ 43 | new = set() 44 | for d in definitions: 45 | imp_or_stmt = d.get_definition() 46 | if isinstance(imp_or_stmt, tree.Import): 47 | s = imports.ImportWrapper(evaluator, d) 48 | new |= set(s.follow(is_goto=True)) 49 | return set(definitions) | new 50 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/cache.py: -------------------------------------------------------------------------------- 1 | """ 2 | - the popular ``memoize_default`` works like a typical memoize and returns the 3 | default otherwise. 4 | - ``CachedMetaClass`` uses ``memoize_default`` to do the same with classes. 5 | """ 6 | 7 | import inspect 8 | 9 | NO_DEFAULT = object() 10 | 11 | 12 | def memoize_default(default=NO_DEFAULT, evaluator_is_first_arg=False, second_arg_is_evaluator=False): 13 | """ This is a typical memoization decorator, BUT there is one difference: 14 | To prevent recursion it sets defaults. 15 | 16 | Preventing recursion is in this case the much bigger use than speed. I 17 | don't think, that there is a big speed difference, but there are many cases 18 | where recursion could happen (think about a = b; b = a). 19 | """ 20 | def func(function): 21 | def wrapper(obj, *args, **kwargs): 22 | if evaluator_is_first_arg: 23 | cache = obj.memoize_cache 24 | elif second_arg_is_evaluator: # needed for meta classes 25 | cache = args[0].memoize_cache 26 | else: 27 | cache = obj._evaluator.memoize_cache 28 | 29 | try: 30 | memo = cache[function] 31 | except KeyError: 32 | memo = {} 33 | cache[function] = memo 34 | 35 | key = (obj, args, frozenset(kwargs.items())) 36 | if key in memo: 37 | return memo[key] 38 | else: 39 | if default is not NO_DEFAULT: 40 | memo[key] = default 41 | rv = function(obj, *args, **kwargs) 42 | if inspect.isgenerator(rv): 43 | rv = list(rv) 44 | memo[key] = rv 45 | return rv 46 | return wrapper 47 | return func 48 | 49 | 50 | class CachedMetaClass(type): 51 | """ 52 | This is basically almost the same than the decorator above, it just caches 53 | class initializations. Either you do it this way or with decorators, but 54 | with decorators you lose class access (isinstance, etc). 55 | """ 56 | @memoize_default(None, second_arg_is_evaluator=True) 57 | def __call__(self, *args, **kwargs): 58 | return super(CachedMetaClass, self).__call__(*args, **kwargs) 59 | -------------------------------------------------------------------------------- /lib/jedi/api/helpers.py: -------------------------------------------------------------------------------- 1 | """ 2 | Helpers for the API 3 | """ 4 | import re 5 | 6 | from jedi.parser import tree as pt 7 | from jedi.evaluate import imports 8 | 9 | 10 | def completion_parts(path_until_cursor): 11 | """ 12 | Returns the parts for the completion 13 | :return: tuple - (path, dot, like) 14 | """ 15 | match = re.match(r'^(.*?)(\.|)(\w?[\w\d]*)$', path_until_cursor, flags=re.S) 16 | return match.groups() 17 | 18 | 19 | def sorted_definitions(defs): 20 | # Note: `or ''` below is required because `module_path` could be 21 | return sorted(defs, key=lambda x: (x.module_path or '', x.line or 0, x.column or 0)) 22 | 23 | 24 | def get_on_import_stmt(evaluator, user_context, user_stmt, is_like_search=False): 25 | """ 26 | Resolve the user statement, if it is an import. Only resolve the 27 | parts until the user position. 28 | """ 29 | name = user_stmt.name_for_position(user_context.position) 30 | if name is None: 31 | return None, None 32 | 33 | i = imports.ImportWrapper(evaluator, name) 34 | return i, name 35 | 36 | 37 | def check_error_statements(module, pos): 38 | for error_statement in module.error_statement_stacks: 39 | if error_statement.first_type in ('import_from', 'import_name') \ 40 | and error_statement.first_pos < pos <= error_statement.next_start_pos: 41 | return importer_from_error_statement(error_statement, pos) 42 | return None, 0, False, False 43 | 44 | 45 | def importer_from_error_statement(error_statement, pos): 46 | def check_dotted(children): 47 | for name in children[::2]: 48 | if name.start_pos <= pos: 49 | yield name 50 | 51 | names = [] 52 | level = 0 53 | only_modules = True 54 | unfinished_dotted = False 55 | for typ, nodes in error_statement.stack: 56 | if typ == 'dotted_name': 57 | names += check_dotted(nodes) 58 | if nodes[-1] == '.': 59 | # An unfinished dotted_name 60 | unfinished_dotted = True 61 | elif typ == 'import_name': 62 | if nodes[0].start_pos <= pos <= nodes[0].end_pos: 63 | # We are on the import. 64 | return None, 0, False, False 65 | elif typ == 'import_from': 66 | for node in nodes: 67 | if node.start_pos >= pos: 68 | break 69 | elif isinstance(node, pt.Node) and node.type == 'dotted_name': 70 | names += check_dotted(node.children) 71 | elif node in ('.', '...'): 72 | level += len(node.value) 73 | elif isinstance(node, pt.Name): 74 | names.append(node) 75 | elif node == 'import': 76 | only_modules = False 77 | 78 | return names, level, only_modules, unfinished_dotted 79 | -------------------------------------------------------------------------------- /lib/jedi/debug.py: -------------------------------------------------------------------------------- 1 | from jedi._compatibility import encoding, is_py3, u 2 | import inspect 3 | import os 4 | import time 5 | 6 | try: 7 | if os.name == 'nt': 8 | # does not work on Windows, as pyreadline and colorama interfere 9 | raise ImportError 10 | else: 11 | # Use colorama for nicer console output. 12 | from colorama import Fore, init 13 | from colorama import initialise 14 | # pytest resets the stream at the end - causes troubles. Since after 15 | # every output the stream is reset automatically we don't need this. 16 | initialise.atexit_done = True 17 | init() 18 | except ImportError: 19 | class Fore(object): 20 | RED = '' 21 | GREEN = '' 22 | YELLOW = '' 23 | RESET = '' 24 | 25 | NOTICE = object() 26 | WARNING = object() 27 | SPEED = object() 28 | 29 | enable_speed = False 30 | enable_warning = False 31 | enable_notice = False 32 | 33 | # callback, interface: level, str 34 | debug_function = None 35 | ignored_modules = ['jedi.evaluate.builtin', 'jedi.parser'] 36 | _debug_indent = -1 37 | _start_time = time.time() 38 | 39 | 40 | def reset_time(): 41 | global _start_time, _debug_indent 42 | _start_time = time.time() 43 | _debug_indent = -1 44 | 45 | 46 | def increase_indent(func): 47 | """Decorator for makin """ 48 | def wrapper(*args, **kwargs): 49 | global _debug_indent 50 | _debug_indent += 1 51 | try: 52 | result = func(*args, **kwargs) 53 | finally: 54 | _debug_indent -= 1 55 | return result 56 | return wrapper 57 | 58 | 59 | def dbg(message, *args): 60 | """ Looks at the stack, to see if a debug message should be printed. """ 61 | if debug_function and enable_notice: 62 | frm = inspect.stack()[1] 63 | mod = inspect.getmodule(frm[0]) 64 | if not (mod.__name__ in ignored_modules): 65 | i = ' ' * _debug_indent 66 | debug_function(NOTICE, i + 'dbg: ' + message % tuple(u(repr(a)) for a in args)) 67 | 68 | 69 | def warning(message, *args): 70 | if debug_function and enable_warning: 71 | i = ' ' * _debug_indent 72 | debug_function(WARNING, i + 'warning: ' + message % tuple(u(repr(a)) for a in args)) 73 | 74 | 75 | def speed(name): 76 | if debug_function and enable_speed: 77 | now = time.time() 78 | i = ' ' * _debug_indent 79 | debug_function(SPEED, i + 'speed: ' + '%s %s' % (name, now - _start_time)) 80 | 81 | 82 | def print_to_stdout(level, str_out): 83 | """ The default debug function """ 84 | if level == NOTICE: 85 | col = Fore.GREEN 86 | elif level == WARNING: 87 | col = Fore.RED 88 | else: 89 | col = Fore.YELLOW 90 | if not is_py3: 91 | str_out = str_out.encode(encoding, 'replace') 92 | print(col + str_out + Fore.RESET) 93 | 94 | 95 | # debug_function = print_to_stdout 96 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/flow_analysis.py: -------------------------------------------------------------------------------- 1 | from jedi.parser import tree 2 | 3 | 4 | class Status(object): 5 | lookup_table = {} 6 | 7 | def __init__(self, value, name): 8 | self._value = value 9 | self._name = name 10 | Status.lookup_table[value] = self 11 | 12 | def invert(self): 13 | if self is REACHABLE: 14 | return UNREACHABLE 15 | elif self is UNREACHABLE: 16 | return REACHABLE 17 | else: 18 | return UNSURE 19 | 20 | def __and__(self, other): 21 | if UNSURE in (self, other): 22 | return UNSURE 23 | else: 24 | return REACHABLE if self._value and other._value else UNREACHABLE 25 | 26 | def __repr__(self): 27 | return '<%s: %s>' % (type(self).__name__, self._name) 28 | 29 | 30 | REACHABLE = Status(True, 'reachable') 31 | UNREACHABLE = Status(False, 'unreachable') 32 | UNSURE = Status(None, 'unsure') 33 | 34 | 35 | def break_check(evaluator, base_scope, stmt, origin_scope=None): 36 | element_scope = evaluator.wrap(stmt.get_parent_scope(include_flows=True)) 37 | # Direct parents get resolved, we filter scopes that are separate branches. 38 | # This makes sense for autocompletion and static analysis. For actual 39 | # Python it doesn't matter, because we're talking about potentially 40 | # unreachable code. 41 | # e.g. `if 0:` would cause all name lookup within the flow make 42 | # unaccessible. This is not a "problem" in Python, because the code is 43 | # never called. In Jedi though, we still want to infer types. 44 | while origin_scope is not None: 45 | if element_scope == origin_scope: 46 | return REACHABLE 47 | origin_scope = origin_scope.parent 48 | return _break_check(evaluator, stmt, base_scope, element_scope) 49 | 50 | 51 | def _break_check(evaluator, stmt, base_scope, element_scope): 52 | element_scope = evaluator.wrap(element_scope) 53 | base_scope = evaluator.wrap(base_scope) 54 | 55 | reachable = REACHABLE 56 | if isinstance(element_scope, tree.IfStmt): 57 | if element_scope.node_after_else(stmt): 58 | for check_node in element_scope.check_nodes(): 59 | reachable = _check_if(evaluator, check_node) 60 | if reachable in (REACHABLE, UNSURE): 61 | break 62 | reachable = reachable.invert() 63 | else: 64 | node = element_scope.node_in_which_check_node(stmt) 65 | reachable = _check_if(evaluator, node) 66 | elif isinstance(element_scope, (tree.TryStmt, tree.WhileStmt)): 67 | return UNSURE 68 | 69 | # Only reachable branches need to be examined further. 70 | if reachable in (UNREACHABLE, UNSURE): 71 | return reachable 72 | 73 | if base_scope != element_scope and base_scope != element_scope.parent: 74 | return reachable & _break_check(evaluator, stmt, base_scope, element_scope.parent) 75 | return reachable 76 | 77 | 78 | def _check_if(evaluator, node): 79 | types = evaluator.eval_element(node) 80 | values = set(x.py__bool__() for x in types) 81 | if len(values) == 1: 82 | return Status.lookup_table[values.pop()] 83 | else: 84 | return UNSURE 85 | -------------------------------------------------------------------------------- /lib/jedi/api/keywords.py: -------------------------------------------------------------------------------- 1 | import pydoc 2 | import keyword 3 | 4 | from jedi._compatibility import is_py3 5 | from jedi import common 6 | from jedi.evaluate import compiled 7 | from jedi.evaluate.helpers import FakeName 8 | from jedi.parser.tree import Leaf 9 | try: 10 | from pydoc_data import topics as pydoc_topics 11 | except ImportError: 12 | # Python 2.6 13 | import pydoc_topics 14 | 15 | if is_py3: 16 | keys = keyword.kwlist 17 | else: 18 | keys = keyword.kwlist + ['None', 'False', 'True'] 19 | 20 | 21 | def has_inappropriate_leaf_keyword(pos, module): 22 | relevant_errors = filter( 23 | lambda error: error.first_pos[0] == pos[0], 24 | module.error_statement_stacks) 25 | 26 | for error in relevant_errors: 27 | if error.next_token in keys: 28 | return True 29 | 30 | return False 31 | 32 | def completion_names(evaluator, stmt, pos, module): 33 | keyword_list = all_keywords() 34 | 35 | if not isinstance(stmt, Leaf) or has_inappropriate_leaf_keyword(pos, module): 36 | keyword_list = filter( 37 | lambda keyword: not keyword.only_valid_as_leaf, 38 | keyword_list 39 | ) 40 | return [keyword.name for keyword in keyword_list] 41 | 42 | 43 | def all_keywords(pos=(0,0)): 44 | return set([Keyword(k, pos) for k in keys]) 45 | 46 | 47 | def keyword(string, pos=(0,0)): 48 | if string in keys: 49 | return Keyword(string, pos) 50 | else: 51 | return None 52 | 53 | 54 | def get_operator(string, pos): 55 | return Keyword(string, pos) 56 | 57 | 58 | keywords_only_valid_as_leaf = ( 59 | 'continue', 60 | 'break', 61 | ) 62 | 63 | 64 | class Keyword(object): 65 | def __init__(self, name, pos): 66 | self.name = FakeName(name, self, pos) 67 | self.start_pos = pos 68 | self.parent = compiled.builtin 69 | 70 | def get_parent_until(self): 71 | return self.parent 72 | 73 | @property 74 | def only_valid_as_leaf(self): 75 | return self.name.value in keywords_only_valid_as_leaf 76 | 77 | @property 78 | def names(self): 79 | """ For a `parsing.Name` like comparision """ 80 | return [self.name] 81 | 82 | @property 83 | def docstr(self): 84 | return imitate_pydoc(self.name) 85 | 86 | def __repr__(self): 87 | return '<%s: %s>' % (type(self).__name__, self.name) 88 | 89 | 90 | def imitate_pydoc(string): 91 | """ 92 | It's not possible to get the pydoc's without starting the annoying pager 93 | stuff. 94 | """ 95 | # str needed because of possible unicode stuff in py2k (pydoc doesn't work 96 | # with unicode strings) 97 | string = str(string) 98 | h = pydoc.help 99 | with common.ignored(KeyError): 100 | # try to access symbols 101 | string = h.symbols[string] 102 | string, _, related = string.partition(' ') 103 | 104 | get_target = lambda s: h.topics.get(s, h.keywords.get(s)) 105 | while isinstance(string, str): 106 | string = get_target(string) 107 | 108 | try: 109 | # is a tuple now 110 | label, related = string 111 | except TypeError: 112 | return '' 113 | 114 | try: 115 | return pydoc_topics.topics[label] if pydoc_topics else '' 116 | except KeyError: 117 | return '' 118 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/compiled/fake/_sre.pym: -------------------------------------------------------------------------------- 1 | def compile(): 2 | class SRE_Match(): 3 | endpos = int() 4 | lastgroup = int() 5 | lastindex = int() 6 | pos = int() 7 | string = str() 8 | regs = ((int(), int()),) 9 | 10 | def __init__(self, pattern): 11 | self.re = pattern 12 | 13 | def start(self): 14 | return int() 15 | 16 | def end(self): 17 | return int() 18 | 19 | def span(self): 20 | return int(), int() 21 | 22 | def expand(self): 23 | return str() 24 | 25 | def group(self, nr): 26 | return str() 27 | 28 | def groupdict(self): 29 | return {str(): str()} 30 | 31 | def groups(self): 32 | return (str(),) 33 | 34 | class SRE_Pattern(): 35 | flags = int() 36 | groupindex = {} 37 | groups = int() 38 | pattern = str() 39 | 40 | def findall(self, string, pos=None, endpos=None): 41 | """ 42 | findall(string[, pos[, endpos]]) --> list. 43 | Return a list of all non-overlapping matches of pattern in string. 44 | """ 45 | return [str()] 46 | 47 | def finditer(self, string, pos=None, endpos=None): 48 | """ 49 | finditer(string[, pos[, endpos]]) --> iterator. 50 | Return an iterator over all non-overlapping matches for the 51 | RE pattern in string. For each match, the iterator returns a 52 | match object. 53 | """ 54 | yield SRE_Match(self) 55 | 56 | def match(self, string, pos=None, endpos=None): 57 | """ 58 | match(string[, pos[, endpos]]) --> match object or None. 59 | Matches zero or more characters at the beginning of the string 60 | pattern 61 | """ 62 | return SRE_Match(self) 63 | 64 | def scanner(self, string, pos=None, endpos=None): 65 | pass 66 | 67 | def search(self, string, pos=None, endpos=None): 68 | """ 69 | search(string[, pos[, endpos]]) --> match object or None. 70 | Scan through string looking for a match, and return a corresponding 71 | MatchObject instance. Return None if no position in the string matches. 72 | """ 73 | return SRE_Match(self) 74 | 75 | def split(self, string, maxsplit=0]): 76 | """ 77 | split(string[, maxsplit = 0]) --> list. 78 | Split string by the occurrences of pattern. 79 | """ 80 | return [str()] 81 | 82 | def sub(self, repl, string, count=0): 83 | """ 84 | sub(repl, string[, count = 0]) --> newstring 85 | Return the string obtained by replacing the leftmost non-overlapping 86 | occurrences of pattern in string by the replacement repl. 87 | """ 88 | return str() 89 | 90 | def subn(self, repl, string, count=0): 91 | """ 92 | subn(repl, string[, count = 0]) --> (newstring, number of subs) 93 | Return the tuple (new_string, number_of_subs_made) found by replacing 94 | the leftmost non-overlapping occurrences of pattern with the 95 | replacement repl. 96 | """ 97 | return (str(), int()) 98 | 99 | return SRE_Pattern() 100 | -------------------------------------------------------------------------------- /lib/tools.py: -------------------------------------------------------------------------------- 1 | import io 2 | import json 3 | import os 4 | import sys 5 | import traceback 6 | 7 | # Load jedi library included with this package 8 | sys.path.append(os.path.dirname(__file__)) 9 | 10 | import jedi # noqa 11 | 12 | 13 | class JediTools(object): 14 | def __init__(self): 15 | self.default_sys_path = sys.path 16 | self._input = io.open(sys.stdin.fileno(), encoding='utf-8') 17 | 18 | @classmethod 19 | def _get_top_level_module(cls, path): 20 | """Recursively walk through directories looking for top level module. 21 | 22 | Jedi will use current filepath to look for another modules at same path, 23 | but it will not be able to see modules **above**, so our goal 24 | is to find the higher python module available from filepath. 25 | """ 26 | _path, _ = os.path.split(path) 27 | if os.path.isfile(os.path.join(_path, '__init__.py')): 28 | return cls._get_top_level_module(_path) 29 | return path 30 | 31 | def _serialize(self, response_type, definitions): 32 | _definitions = [] 33 | for definition in definitions: 34 | _definitions.append({ 35 | 'path': definition.module_path, 36 | 'name': definition.name, 37 | 'line': definition.line, 38 | 'col': definition.column, 39 | }) 40 | return json.dumps({ 41 | 'type': response_type, 42 | 'definitions': _definitions, 43 | }) 44 | 45 | def _process_request(self, request): 46 | request = json.loads(request) 47 | 48 | path = self._get_top_level_module(request.get('path', '')) 49 | project_paths = (request.get('project_paths', [])) 50 | for project_path in project_paths: 51 | project_path_top_module = self._get_top_level_module(project_path) 52 | if project_path_top_module not in sys.path: 53 | sys.path.insert(0, project_path_top_module) 54 | if path not in sys.path: 55 | sys.path.insert(0, path) 56 | 57 | script = jedi.api.Script( 58 | source=request['source'], 59 | line=request['line'] + 1, 60 | column=request['col'], 61 | path=request.get('path', ''), 62 | ) 63 | 64 | if request['type'] == 'usages': 65 | self._write_response(self._serialize('usages', script.usages())) 66 | elif request['type'] == 'gotoDef': 67 | self._write_response(self._serialize('gotoDef', script.goto_definitions())) 68 | else: 69 | raise ValueError('Unknown request type: {}'.format(request['type'])) 70 | 71 | def _write_response(self, response): 72 | sys.stdout.write(response + '\n') 73 | sys.stdout.flush() 74 | 75 | def watch(self): 76 | while True: 77 | try: 78 | data = self._input.readline() 79 | 80 | # Check if the connection has been broken 81 | if len(data) == 0: 82 | break 83 | 84 | self._process_request(data) 85 | except Exception as e: 86 | with open('error.log', 'wa') as fp: 87 | traceback.print_exc(file=fp) 88 | fp.write('Input:\n{}\n'.format(data)) 89 | error_response = json.dumps({'error': str(e)}) 90 | sys.stdout.write(error_response + '\n') 91 | sys.stdout.flush() 92 | 93 | if __name__ == '__main__': 94 | JediTools().watch() 95 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Python Tools 2 | 3 | [![Build Status](https://travis-ci.org/MichaelAquilina/python-tools.svg?branch=master)](https://travis-ci.org/MichaelAquilina/python-tools) 4 | [![Build Status Windows](https://ci.appveyor.com/api/projects/status/jnu90b2bgqar87es/branch/master?svg=true)](https://ci.appveyor.com/project/MichaelAquilina/python-tools) 5 | 6 | Some handy tools to make developing Python code in Atom even more enjoyable. 7 | 8 | Goes along really nicely with the [autocomplete-python](https://atom.io/packages/autocomplete-python) package to provide a full Python IDE experience in Atom. 9 | 10 | > Note: I am no longer maintaining this package due to no longer using Atom anymore. If someone wishes to take over maintainership feel free to send me an email 11 | 12 | ## Details 13 | 14 | This package uses [Jedi](https://pypi.python.org/pypi/jedi) in addition to other custom code to provide numerous pieces of functionality to make you more productive: 15 | 16 | ### Show Usages 17 | Default shortcut: `ctrl+alt+u` 18 | 19 | ![demo](http://i.imgur.com/coOlBn7.gif?1) 20 | 21 | Select the usages of a specific symbol in your file. 22 | 23 | This is particularly handy for quickly refactoring/renaming variables and other symbols within your code. 24 | 25 | Currently only supports detection of symbols within the same file. This will be extended to support usages outside the current file in the future. 26 | 27 | ### Goto Definition 28 | Default shortcut: `ctrl+alt+g` 29 | 30 | ![demo](http://i.imgur.com/iXHY7HE.gif?1) 31 | 32 | Go to to the original definition of the symbol under the cursor. This will open the corresponding file if it is not already open. 33 | 34 | Files which form part of the standard library / are installed as third party modules are still opened, which is a really nice way of analysing and understanding behavior of code. 35 | 36 | ### Select String Contents 37 | Default shortcut: `ctrl+alt+e` 38 | 39 | ![demo](http://i.imgur.com/tUeduTK.gif?1) 40 | 41 | Select the entire contents of the string currently being selected. 42 | 43 | Works with single line as well as block strings. 44 | 45 | #### More tools to come? 46 | Ideas and feature requests are welcome. Some ideas of potential features to be added: 47 | * Swap string delimiters between ' and " 48 | * Show usages and provide the ability to rename across multiple files 49 | * Select entire symbols 50 | 51 | ## Windows Support 52 | Windows should work, however I do not have access to a Windows machine and cannot therefore test out releases. 53 | 54 | I have builds being tested on appveyor which should prevent any obvious errors from causing breakages. I have now also setup a virtual machine to test changes, but a lot of the issues that I would catch through normal day to day usage will not be found and I'll have to rely to the community to make me aware of them. 55 | 56 | If you have any issue running this package on windows then please open an [Issue](https://github.com/michaelaquilina/python-tools/issues). 57 | 58 | Common Problem: "python-tools was unable to find your machine's python executable" 59 | * Make sure python is installed on your machine (jedi is used internally which runs off python) 60 | * Make sure your python executable is added to your PATH environment variable 61 | 62 | ## Work In Progress 63 | 64 | This Atom package is very much a Work In Progress and is far from currently being perfect! There are a lot of things I will be looking to improve. 65 | 66 | If you find anything which does not seem like expected behavior or have any suggestions, feel free to open an [Issue](https://github.com/michaelaquilina/python-tools/issues) on my Github page. 67 | -------------------------------------------------------------------------------- /lib/jedi/api/interpreter.py: -------------------------------------------------------------------------------- 1 | """ 2 | TODO Some parts of this module are still not well documented. 3 | """ 4 | import inspect 5 | import re 6 | 7 | from jedi._compatibility import builtins 8 | from jedi import debug 9 | from jedi.common import source_to_unicode 10 | from jedi.cache import underscore_memoization 11 | from jedi.evaluate import compiled 12 | from jedi.evaluate.compiled.fake import get_module 13 | from jedi.parser import tree as pt 14 | from jedi.parser import load_grammar 15 | from jedi.parser.fast import FastParser 16 | from jedi.evaluate import helpers 17 | from jedi.evaluate import iterable 18 | from jedi.evaluate import representation as er 19 | 20 | 21 | def add_namespaces_to_parser(evaluator, namespaces, parser_module): 22 | for namespace in namespaces: 23 | for key, value in namespace.items(): 24 | # Name lookups in an ast tree work by checking names_dict. 25 | # Therefore we just add fake names to that and we're done. 26 | arr = parser_module.names_dict.setdefault(key, []) 27 | arr.append(LazyName(evaluator, parser_module, key, value)) 28 | 29 | 30 | class LazyName(helpers.FakeName): 31 | def __init__(self, evaluator, module, name, value): 32 | super(LazyName, self).__init__(name) 33 | self._module = module 34 | self._evaluator = evaluator 35 | self._value = value 36 | self._name = name 37 | 38 | def is_definition(self): 39 | return True 40 | 41 | @property 42 | @underscore_memoization 43 | def parent(self): 44 | """ 45 | Creating fake statements for the interpreter. 46 | """ 47 | obj = self._value 48 | parser_path = [] 49 | if inspect.ismodule(obj): 50 | module = obj 51 | else: 52 | names = [] 53 | try: 54 | o = obj.__objclass__ 55 | names.append(obj.__name__) 56 | obj = o 57 | except AttributeError: 58 | pass 59 | 60 | try: 61 | module_name = obj.__module__ 62 | names.insert(0, obj.__name__) 63 | except AttributeError: 64 | # Unfortunately in some cases like `int` there's no __module__ 65 | module = builtins 66 | else: 67 | # TODO this import is wrong. Yields x for x.y.z instead of z 68 | module = __import__(module_name) 69 | parser_path = names 70 | raw_module = get_module(self._value) 71 | 72 | found = [] 73 | try: 74 | path = module.__file__ 75 | except AttributeError: 76 | pass 77 | else: 78 | path = re.sub('c$', '', path) 79 | if path.endswith('.py'): 80 | # cut the `c` from `.pyc` 81 | with open(path) as f: 82 | source = source_to_unicode(f.read()) 83 | mod = FastParser(load_grammar(), source, path[:-1]).module 84 | if parser_path: 85 | assert len(parser_path) == 1 86 | found = self._evaluator.find_types(mod, parser_path[0], search_global=True) 87 | else: 88 | found = [self._evaluator.wrap(mod)] 89 | 90 | if not found: 91 | debug.warning('Possibly an interpreter lookup for Python code failed %s', 92 | parser_path) 93 | 94 | if not found: 95 | evaluated = compiled.CompiledObject(obj) 96 | if evaluated == builtins: 97 | # The builtins module is special and always cached. 98 | evaluated = compiled.builtin 99 | found = [evaluated] 100 | 101 | content = iterable.AlreadyEvaluated(found) 102 | stmt = pt.ExprStmt([self, pt.Operator(pt.zero_position_modifier, 103 | '=', (0, 0), ''), content]) 104 | stmt.parent = self._module 105 | return stmt 106 | 107 | @parent.setter 108 | def parent(self, value): 109 | """Needed because the super class tries to set parent.""" 110 | -------------------------------------------------------------------------------- /lib/jedi/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Utilities for end-users. 3 | """ 4 | 5 | from __future__ import absolute_import 6 | import __main__ 7 | from collections import namedtuple 8 | import re 9 | import os 10 | import sys 11 | 12 | from jedi import Interpreter 13 | from jedi.api.helpers import completion_parts 14 | from jedi.parser.user_context import UserContext 15 | 16 | 17 | def setup_readline(namespace_module=__main__): 18 | """ 19 | Install Jedi completer to :mod:`readline`. 20 | 21 | This function setups :mod:`readline` to use Jedi in Python interactive 22 | shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically 23 | ``$HOME/.pythonrc.py``), you can add this piece of code:: 24 | 25 | try: 26 | from jedi.utils import setup_readline 27 | setup_readline() 28 | except ImportError: 29 | # Fallback to the stdlib readline completer if it is installed. 30 | # Taken from http://docs.python.org/2/library/rlcompleter.html 31 | print("Jedi is not installed, falling back to readline") 32 | try: 33 | import readline 34 | import rlcompleter 35 | readline.parse_and_bind("tab: complete") 36 | except ImportError: 37 | print("Readline is not installed either. No tab completion is enabled.") 38 | 39 | This will fallback to the readline completer if Jedi is not installed. 40 | The readline completer will only complete names in the global namespace, 41 | so for example:: 42 | 43 | ran 44 | 45 | will complete to ``range`` 46 | 47 | with both Jedi and readline, but:: 48 | 49 | range(10).cou 50 | 51 | will show complete to ``range(10).count`` only with Jedi. 52 | 53 | You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to 54 | your shell profile (usually ``.bash_profile`` or ``.profile`` if you use 55 | bash). 56 | 57 | """ 58 | class JediRL(object): 59 | def complete(self, text, state): 60 | """ 61 | This complete stuff is pretty weird, a generator would make 62 | a lot more sense, but probably due to backwards compatibility 63 | this is still the way how it works. 64 | 65 | The only important part is stuff in the ``state == 0`` flow, 66 | everything else has been copied from the ``rlcompleter`` std. 67 | library module. 68 | """ 69 | if state == 0: 70 | sys.path.insert(0, os.getcwd()) 71 | # Calling python doesn't have a path, so add to sys.path. 72 | try: 73 | interpreter = Interpreter(text, [namespace_module.__dict__]) 74 | 75 | path = UserContext(text, (1, len(text))).get_path_until_cursor() 76 | path, dot, like = completion_parts(path) 77 | before = text[:len(text) - len(like)] 78 | completions = interpreter.completions() 79 | finally: 80 | sys.path.pop(0) 81 | 82 | self.matches = [before + c.name_with_symbols for c in completions] 83 | try: 84 | return self.matches[state] 85 | except IndexError: 86 | return None 87 | 88 | try: 89 | import readline 90 | except ImportError: 91 | print("Module readline not available.") 92 | else: 93 | readline.set_completer(JediRL().complete) 94 | readline.parse_and_bind("tab: complete") 95 | # jedi itself does the case matching 96 | readline.parse_and_bind("set completion-ignore-case on") 97 | # because it's easier to hit the tab just once 98 | readline.parse_and_bind("set show-all-if-unmodified") 99 | readline.parse_and_bind("set show-all-if-ambiguous on") 100 | # don't repeat all the things written in the readline all the time 101 | readline.parse_and_bind("set completion-prefix-display-length 2") 102 | # No delimiters, Jedi handles that. 103 | readline.set_completer_delims('') 104 | 105 | 106 | def version_info(): 107 | """ 108 | Returns a namedtuple of Jedi's version, similar to Python's 109 | ``sys.version_info``. 110 | """ 111 | Version = namedtuple('Version', 'major, minor, micro') 112 | from jedi import __version__ 113 | tupl = re.findall('[a-z]+|\d+', __version__) 114 | return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) 115 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/compiled/fake.py: -------------------------------------------------------------------------------- 1 | """ 2 | Loads functions that are mixed in to the standard library. E.g. builtins are 3 | written in C (binaries), but my autocompletion only understands Python code. By 4 | mixing in Python code, the autocompletion should work much better for builtins. 5 | """ 6 | 7 | import os 8 | import inspect 9 | 10 | from jedi._compatibility import is_py3, builtins, unicode 11 | from jedi.parser import Parser, load_grammar 12 | from jedi.parser import tree as pt 13 | from jedi.evaluate.helpers import FakeName 14 | 15 | modules = {} 16 | 17 | 18 | def _load_faked_module(module): 19 | module_name = module.__name__ 20 | if module_name == '__builtin__' and not is_py3: 21 | module_name = 'builtins' 22 | 23 | try: 24 | return modules[module_name] 25 | except KeyError: 26 | path = os.path.dirname(os.path.abspath(__file__)) 27 | try: 28 | with open(os.path.join(path, 'fake', module_name) + '.pym') as f: 29 | source = f.read() 30 | except IOError: 31 | modules[module_name] = None 32 | return 33 | grammar = load_grammar('grammar3.4') 34 | module = Parser(grammar, unicode(source), module_name).module 35 | modules[module_name] = module 36 | 37 | if module_name == 'builtins' and not is_py3: 38 | # There are two implementations of `open` for either python 2/3. 39 | # -> Rename the python2 version (`look at fake/builtins.pym`). 40 | open_func = search_scope(module, 'open') 41 | open_func.children[1] = FakeName('open_python3') 42 | open_func = search_scope(module, 'open_python2') 43 | open_func.children[1] = FakeName('open') 44 | return module 45 | 46 | 47 | def search_scope(scope, obj_name): 48 | for s in scope.subscopes: 49 | if str(s.name) == obj_name: 50 | return s 51 | 52 | 53 | def get_module(obj): 54 | if inspect.ismodule(obj): 55 | return obj 56 | try: 57 | obj = obj.__objclass__ 58 | except AttributeError: 59 | pass 60 | 61 | try: 62 | imp_plz = obj.__module__ 63 | except AttributeError: 64 | # Unfortunately in some cases like `int` there's no __module__ 65 | return builtins 66 | else: 67 | return __import__(imp_plz) 68 | 69 | 70 | def _faked(module, obj, name): 71 | # Crazy underscore actions to try to escape all the internal madness. 72 | if module is None: 73 | module = get_module(obj) 74 | 75 | faked_mod = _load_faked_module(module) 76 | if faked_mod is None: 77 | return 78 | 79 | # Having the module as a `parser.representation.module`, we need to scan 80 | # for methods. 81 | if name is None: 82 | if inspect.isbuiltin(obj): 83 | return search_scope(faked_mod, obj.__name__) 84 | elif not inspect.isclass(obj): 85 | # object is a method or descriptor 86 | cls = search_scope(faked_mod, obj.__objclass__.__name__) 87 | if cls is None: 88 | return 89 | return search_scope(cls, obj.__name__) 90 | else: 91 | if obj == module: 92 | return search_scope(faked_mod, name) 93 | else: 94 | cls = search_scope(faked_mod, obj.__name__) 95 | if cls is None: 96 | return 97 | return search_scope(cls, name) 98 | 99 | 100 | def get_faked(module, obj, name=None): 101 | obj = obj.__class__ if is_class_instance(obj) else obj 102 | result = _faked(module, obj, name) 103 | if result is None or isinstance(result, pt.Class): 104 | # We're not interested in classes. What we want is functions. 105 | return None 106 | else: 107 | # Set the docstr which was previously not set (faked modules don't 108 | # contain it). 109 | doc = '"""%s"""' % obj.__doc__ # TODO need escapes. 110 | suite = result.children[-1] 111 | string = pt.String(pt.zero_position_modifier, doc, (0, 0), '') 112 | new_line = pt.Whitespace('\n', (0, 0), '') 113 | docstr_node = pt.Node('simple_stmt', [string, new_line]) 114 | suite.children.insert(2, docstr_node) 115 | return result 116 | 117 | 118 | def is_class_instance(obj): 119 | """Like inspect.* methods.""" 120 | return not (inspect.isclass(obj) or inspect.ismodule(obj) 121 | or inspect.isbuiltin(obj) or inspect.ismethod(obj) 122 | or inspect.ismethoddescriptor(obj) or inspect.iscode(obj) 123 | or inspect.isgenerator(obj)) 124 | -------------------------------------------------------------------------------- /lib/jedi/parser/pgen2/grammar.py: -------------------------------------------------------------------------------- 1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | # Modifications: 5 | # Copyright 2014 David Halter. Integration into Jedi. 6 | # Modifications are dual-licensed: MIT and PSF. 7 | 8 | """This module defines the data structures used to represent a grammar. 9 | 10 | These are a bit arcane because they are derived from the data 11 | structures used by Python's 'pgen' parser generator. 12 | 13 | There's also a table here mapping operators to their names in the 14 | token module; the Python tokenize module reports all operators as the 15 | fallback token code OP, but the parser needs the actual token code. 16 | 17 | """ 18 | 19 | # Python imports 20 | import pickle 21 | 22 | 23 | class Grammar(object): 24 | """Pgen parsing tables conversion class. 25 | 26 | Once initialized, this class supplies the grammar tables for the 27 | parsing engine implemented by parse.py. The parsing engine 28 | accesses the instance variables directly. The class here does not 29 | provide initialization of the tables; several subclasses exist to 30 | do this (see the conv and pgen modules). 31 | 32 | The load() method reads the tables from a pickle file, which is 33 | much faster than the other ways offered by subclasses. The pickle 34 | file is written by calling dump() (after loading the grammar 35 | tables using a subclass). The report() method prints a readable 36 | representation of the tables to stdout, for debugging. 37 | 38 | The instance variables are as follows: 39 | 40 | symbol2number -- a dict mapping symbol names to numbers. Symbol 41 | numbers are always 256 or higher, to distinguish 42 | them from token numbers, which are between 0 and 43 | 255 (inclusive). 44 | 45 | number2symbol -- a dict mapping numbers to symbol names; 46 | these two are each other's inverse. 47 | 48 | states -- a list of DFAs, where each DFA is a list of 49 | states, each state is a list of arcs, and each 50 | arc is a (i, j) pair where i is a label and j is 51 | a state number. The DFA number is the index into 52 | this list. (This name is slightly confusing.) 53 | Final states are represented by a special arc of 54 | the form (0, j) where j is its own state number. 55 | 56 | dfas -- a dict mapping symbol numbers to (DFA, first) 57 | pairs, where DFA is an item from the states list 58 | above, and first is a set of tokens that can 59 | begin this grammar rule (represented by a dict 60 | whose values are always 1). 61 | 62 | labels -- a list of (x, y) pairs where x is either a token 63 | number or a symbol number, and y is either None 64 | or a string; the strings are keywords. The label 65 | number is the index in this list; label numbers 66 | are used to mark state transitions (arcs) in the 67 | DFAs. 68 | 69 | start -- the number of the grammar's start symbol. 70 | 71 | keywords -- a dict mapping keyword strings to arc labels. 72 | 73 | tokens -- a dict mapping token numbers to arc labels. 74 | 75 | """ 76 | 77 | def __init__(self): 78 | self.symbol2number = {} 79 | self.number2symbol = {} 80 | self.states = [] 81 | self.dfas = {} 82 | self.labels = [(0, "EMPTY")] 83 | self.keywords = {} 84 | self.tokens = {} 85 | self.symbol2label = {} 86 | self.start = 256 87 | 88 | def dump(self, filename): 89 | """Dump the grammar tables to a pickle file.""" 90 | with open(filename, "wb") as f: 91 | pickle.dump(self.__dict__, f, 2) 92 | 93 | def load(self, filename): 94 | """Load the grammar tables from a pickle file.""" 95 | with open(filename, "rb") as f: 96 | d = pickle.load(f) 97 | self.__dict__.update(d) 98 | 99 | def copy(self): 100 | """ 101 | Copy the grammar. 102 | """ 103 | new = self.__class__() 104 | for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords", 105 | "tokens", "symbol2label"): 106 | setattr(new, dict_attr, getattr(self, dict_attr).copy()) 107 | new.labels = self.labels[:] 108 | new.states = self.states[:] 109 | new.start = self.start 110 | return new 111 | 112 | def report(self): 113 | """Dump the grammar tables to standard output, for debugging.""" 114 | from pprint import pprint 115 | print("s2n") 116 | pprint(self.symbol2number) 117 | print("n2s") 118 | pprint(self.number2symbol) 119 | print("states") 120 | pprint(self.states) 121 | print("dfas") 122 | pprint(self.dfas) 123 | print("labels") 124 | pprint(self.labels) 125 | print("start", self.start) 126 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/dynamic.py: -------------------------------------------------------------------------------- 1 | """ 2 | One of the really important features of |jedi| is to have an option to 3 | understand code like this:: 4 | 5 | def foo(bar): 6 | bar. # completion here 7 | foo(1) 8 | 9 | There's no doubt wheter bar is an ``int`` or not, but if there's also a call 10 | like ``foo('str')``, what would happen? Well, we'll just show both. Because 11 | that's what a human would expect. 12 | 13 | It works as follows: 14 | 15 | - |Jedi| sees a param 16 | - search for function calls named ``foo`` 17 | - execute these calls and check the input. This work with a ``ParamListener``. 18 | """ 19 | from itertools import chain 20 | 21 | from jedi._compatibility import unicode 22 | from jedi.parser import tree 23 | from jedi import settings 24 | from jedi import debug 25 | from jedi.evaluate.cache import memoize_default 26 | from jedi.evaluate import imports 27 | 28 | 29 | class ParamListener(object): 30 | """ 31 | This listener is used to get the params for a function. 32 | """ 33 | def __init__(self): 34 | self.param_possibilities = [] 35 | 36 | def execute(self, params): 37 | self.param_possibilities += params 38 | 39 | 40 | @debug.increase_indent 41 | def search_params(evaluator, param): 42 | """ 43 | A dynamic search for param values. If you try to complete a type: 44 | 45 | >>> def func(foo): 46 | ... foo 47 | >>> func(1) 48 | >>> func("") 49 | 50 | It is not known what the type ``foo`` without analysing the whole code. You 51 | have to look for all calls to ``func`` to find out what ``foo`` possibly 52 | is. 53 | """ 54 | if not settings.dynamic_params: 55 | return [] 56 | 57 | func = param.get_parent_until(tree.Function) 58 | debug.dbg('Dynamic param search for %s in %s.', param, str(func.name)) 59 | # Compare the param names. 60 | names = [n for n in search_function_call(evaluator, func) 61 | if n.value == param.name.value] 62 | # Evaluate the ExecutedParams to types. 63 | result = list(chain.from_iterable(n.parent.eval(evaluator) for n in names)) 64 | debug.dbg('Dynamic param result %s', result) 65 | return result 66 | 67 | 68 | @memoize_default([], evaluator_is_first_arg=True) 69 | def search_function_call(evaluator, func): 70 | """ 71 | Returns a list of param names. 72 | """ 73 | from jedi.evaluate import representation as er 74 | 75 | def get_params_for_module(module): 76 | """ 77 | Returns the values of a param, or an empty array. 78 | """ 79 | @memoize_default([], evaluator_is_first_arg=True) 80 | def get_posibilities(evaluator, module, func_name): 81 | try: 82 | names = module.used_names[func_name] 83 | except KeyError: 84 | return [] 85 | 86 | for name in names: 87 | parent = name.parent 88 | if tree.is_node(parent, 'trailer'): 89 | parent = parent.parent 90 | 91 | trailer = None 92 | if tree.is_node(parent, 'power'): 93 | for t in parent.children[1:]: 94 | if t == '**': 95 | break 96 | if t.start_pos > name.start_pos and t.children[0] == '(': 97 | trailer = t 98 | break 99 | if trailer is not None: 100 | types = evaluator.goto_definition(name) 101 | 102 | # We have to remove decorators, because they are not the 103 | # "original" functions, this way we can easily compare. 104 | # At the same time we also have to remove InstanceElements. 105 | undec = [] 106 | for escope in types: 107 | if escope.isinstance(er.Function, er.Instance) \ 108 | and escope.decorates is not None: 109 | undec.append(escope.decorates) 110 | elif isinstance(escope, er.InstanceElement): 111 | undec.append(escope.var) 112 | else: 113 | undec.append(escope) 114 | 115 | if evaluator.wrap(compare) in undec: 116 | # Only if we have the correct function we execute 117 | # it, otherwise just ignore it. 118 | evaluator.eval_trailer(types, trailer) 119 | return listener.param_possibilities 120 | return get_posibilities(evaluator, module, func_name) 121 | 122 | current_module = func.get_parent_until() 123 | func_name = unicode(func.name) 124 | compare = func 125 | if func_name == '__init__': 126 | cls = func.get_parent_scope() 127 | if isinstance(cls, tree.Class): 128 | func_name = unicode(cls.name) 129 | compare = cls 130 | 131 | # add the listener 132 | listener = ParamListener() 133 | func.listeners.add(listener) 134 | 135 | try: 136 | result = [] 137 | # This is like backtracking: Get the first possible result. 138 | for mod in imports.get_modules_containing_name(evaluator, [current_module], func_name): 139 | result = get_params_for_module(mod) 140 | if result: 141 | break 142 | finally: 143 | # cleanup: remove the listener; important: should not stick. 144 | func.listeners.remove(listener) 145 | 146 | return result 147 | -------------------------------------------------------------------------------- /lib/jedi/common.py: -------------------------------------------------------------------------------- 1 | """ A universal module with functions / classes without dependencies. """ 2 | import sys 3 | import contextlib 4 | import functools 5 | import re 6 | from ast import literal_eval 7 | 8 | from jedi._compatibility import unicode, reraise 9 | from jedi import settings 10 | 11 | 12 | class UncaughtAttributeError(Exception): 13 | """ 14 | Important, because `__getattr__` and `hasattr` catch AttributeErrors 15 | implicitly. This is really evil (mainly because of `__getattr__`). 16 | `hasattr` in Python 2 is even more evil, because it catches ALL exceptions. 17 | Therefore this class originally had to be derived from `BaseException` 18 | instead of `Exception`. But because I removed relevant `hasattr` from 19 | the code base, we can now switch back to `Exception`. 20 | 21 | :param base: return values of sys.exc_info(). 22 | """ 23 | 24 | 25 | def safe_property(func): 26 | return property(reraise_uncaught(func)) 27 | 28 | 29 | def reraise_uncaught(func): 30 | """ 31 | Re-throw uncaught `AttributeError`. 32 | 33 | Usage: Put ``@rethrow_uncaught`` in front of the function 34 | which does **not** suppose to raise `AttributeError`. 35 | 36 | AttributeError is easily get caught by `hasattr` and another 37 | ``except AttributeError`` clause. This becomes problem when you use 38 | a lot of "dynamic" attributes (e.g., using ``@property``) because you 39 | can't distinguish if the property does not exist for real or some code 40 | inside of the "dynamic" attribute through that error. In a well 41 | written code, such error should not exist but getting there is very 42 | difficult. This decorator is to help us getting there by changing 43 | `AttributeError` to `UncaughtAttributeError` to avoid unexpected catch. 44 | This helps us noticing bugs earlier and facilitates debugging. 45 | 46 | .. note:: Treating StopIteration here is easy. 47 | Add that feature when needed. 48 | """ 49 | @functools.wraps(func) 50 | def wrapper(*args, **kwds): 51 | try: 52 | return func(*args, **kwds) 53 | except AttributeError: 54 | exc_info = sys.exc_info() 55 | reraise(UncaughtAttributeError(exc_info[1]), exc_info[2]) 56 | return wrapper 57 | 58 | 59 | class PushBackIterator(object): 60 | def __init__(self, iterator): 61 | self.pushes = [] 62 | self.iterator = iterator 63 | self.current = None 64 | 65 | def push_back(self, value): 66 | self.pushes.append(value) 67 | 68 | def __iter__(self): 69 | return self 70 | 71 | def next(self): 72 | """ Python 2 Compatibility """ 73 | return self.__next__() 74 | 75 | def __next__(self): 76 | if self.pushes: 77 | self.current = self.pushes.pop() 78 | else: 79 | self.current = next(self.iterator) 80 | return self.current 81 | 82 | 83 | @contextlib.contextmanager 84 | def scale_speed_settings(factor): 85 | a = settings.max_executions 86 | b = settings.max_until_execution_unique 87 | settings.max_executions *= factor 88 | settings.max_until_execution_unique *= factor 89 | try: 90 | yield 91 | finally: 92 | settings.max_executions = a 93 | settings.max_until_execution_unique = b 94 | 95 | 96 | def indent_block(text, indention=' '): 97 | """This function indents a text block with a default of four spaces.""" 98 | temp = '' 99 | while text and text[-1] == '\n': 100 | temp += text[-1] 101 | text = text[:-1] 102 | lines = text.split('\n') 103 | return '\n'.join(map(lambda s: indention + s, lines)) + temp 104 | 105 | 106 | @contextlib.contextmanager 107 | def ignored(*exceptions): 108 | """ 109 | Context manager that ignores all of the specified exceptions. This will 110 | be in the standard library starting with Python 3.4. 111 | """ 112 | try: 113 | yield 114 | except exceptions: 115 | pass 116 | 117 | 118 | def source_to_unicode(source, encoding=None): 119 | def detect_encoding(): 120 | """ 121 | For the implementation of encoding definitions in Python, look at: 122 | - http://www.python.org/dev/peps/pep-0263/ 123 | - http://docs.python.org/2/reference/lexical_analysis.html#encoding-declarations 124 | """ 125 | byte_mark = literal_eval(r"b'\xef\xbb\xbf'") 126 | if source.startswith(byte_mark): 127 | # UTF-8 byte-order mark 128 | return 'utf-8' 129 | 130 | first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', str(source)).group(0) 131 | possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", 132 | first_two_lines) 133 | if possible_encoding: 134 | return possible_encoding.group(1) 135 | else: 136 | # the default if nothing else has been set -> PEP 263 137 | return encoding if encoding is not None else 'iso-8859-1' 138 | 139 | if isinstance(source, unicode): 140 | # only cast str/bytes 141 | return source 142 | 143 | # cast to unicode by default 144 | return unicode(source, detect_encoding(), 'replace') 145 | 146 | 147 | def splitlines(string): 148 | """ 149 | A splitlines for Python code. In contrast to Python's ``str.splitlines``, 150 | looks at form feeds and other special characters as normal text. Just 151 | splits ``\n`` and ``\r\n``. 152 | Also different: Returns ``['']`` for an empty string input. 153 | """ 154 | return re.split('\n|\r\n', string) 155 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/recursion.py: -------------------------------------------------------------------------------- 1 | """ 2 | Recursions are the recipe of |jedi| to conquer Python code. However, someone 3 | must stop recursions going mad. Some settings are here to make |jedi| stop at 4 | the right time. You can read more about them :ref:`here `. 5 | 6 | Next to :mod:`jedi.evaluate.cache` this module also makes |jedi| not 7 | thread-safe. Why? ``execution_recursion_decorator`` uses class variables to 8 | count the function calls. 9 | """ 10 | from jedi import debug 11 | from jedi import settings 12 | from jedi.evaluate import compiled 13 | from jedi.evaluate import iterable 14 | 15 | 16 | def recursion_decorator(func): 17 | def run(evaluator, stmt, *args, **kwargs): 18 | rec_detect = evaluator.recursion_detector 19 | # print stmt, len(self.node_statements()) 20 | if rec_detect.push_stmt(stmt): 21 | return [] 22 | else: 23 | result = func(evaluator, stmt, *args, **kwargs) 24 | rec_detect.pop_stmt() 25 | return result 26 | return run 27 | 28 | 29 | class RecursionDetector(object): 30 | """ 31 | A decorator to detect recursions in statements. In a recursion a statement 32 | at the same place, in the same module may not be executed two times. 33 | """ 34 | def __init__(self): 35 | self.top = None 36 | self.current = None 37 | 38 | def push_stmt(self, stmt): 39 | self.current = _RecursionNode(stmt, self.current) 40 | check = self._check_recursion() 41 | if check: 42 | debug.warning('catched stmt recursion: %s against %s @%s', stmt, 43 | check.stmt, stmt.start_pos) 44 | self.pop_stmt() 45 | return True 46 | return False 47 | 48 | def pop_stmt(self): 49 | if self.current is not None: 50 | # I don't know how current can be None, but sometimes it happens 51 | # with Python3. 52 | self.current = self.current.parent 53 | 54 | def _check_recursion(self): 55 | test = self.current 56 | while True: 57 | test = test.parent 58 | if self.current == test: 59 | return test 60 | if not test: 61 | return False 62 | 63 | def node_statements(self): 64 | result = [] 65 | n = self.current 66 | while n: 67 | result.insert(0, n.stmt) 68 | n = n.parent 69 | return result 70 | 71 | 72 | class _RecursionNode(object): 73 | """ A node of the RecursionDecorator. """ 74 | def __init__(self, stmt, parent): 75 | self.script = stmt.get_parent_until() 76 | self.position = stmt.start_pos 77 | self.parent = parent 78 | self.stmt = stmt 79 | 80 | # Don't check param instances, they are not causing recursions 81 | # The same's true for the builtins, because the builtins are really 82 | # simple. 83 | self.is_ignored = self.script == compiled.builtin 84 | 85 | def __eq__(self, other): 86 | if not other: 87 | return None 88 | 89 | return self.script == other.script \ 90 | and self.position == other.position \ 91 | and not self.is_ignored and not other.is_ignored 92 | 93 | 94 | def execution_recursion_decorator(func): 95 | def run(execution, **kwargs): 96 | detector = execution._evaluator.execution_recursion_detector 97 | if detector.push_execution(execution): 98 | result = [] 99 | else: 100 | result = func(execution, **kwargs) 101 | detector.pop_execution() 102 | return result 103 | 104 | return run 105 | 106 | 107 | class ExecutionRecursionDetector(object): 108 | """ 109 | Catches recursions of executions. 110 | It is designed like a Singelton. Only one instance should exist. 111 | """ 112 | def __init__(self): 113 | self.recursion_level = 0 114 | self.parent_execution_funcs = [] 115 | self.execution_funcs = set() 116 | self.execution_count = 0 117 | 118 | def __call__(self, execution): 119 | debug.dbg('Execution recursions: %s', execution, self.recursion_level, 120 | self.execution_count, len(self.execution_funcs)) 121 | if self.check_recursion(execution): 122 | result = [] 123 | else: 124 | result = self.func(execution) 125 | self.pop_execution() 126 | return result 127 | 128 | def pop_execution(cls): 129 | cls.parent_execution_funcs.pop() 130 | cls.recursion_level -= 1 131 | 132 | def push_execution(cls, execution): 133 | in_par_execution_funcs = execution.base in cls.parent_execution_funcs 134 | in_execution_funcs = execution.base in cls.execution_funcs 135 | cls.recursion_level += 1 136 | cls.execution_count += 1 137 | cls.execution_funcs.add(execution.base) 138 | cls.parent_execution_funcs.append(execution.base) 139 | 140 | if cls.execution_count > settings.max_executions: 141 | return True 142 | 143 | if isinstance(execution.base, (iterable.Array, iterable.Generator)): 144 | return False 145 | module = execution.get_parent_until() 146 | if module == compiled.builtin: 147 | return False 148 | 149 | if in_par_execution_funcs: 150 | if cls.recursion_level > settings.max_function_recursion_level: 151 | return True 152 | if in_execution_funcs and \ 153 | len(cls.execution_funcs) > settings.max_until_execution_unique: 154 | return True 155 | if cls.execution_count > settings.max_executions_without_builtins: 156 | return True 157 | return False 158 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/helpers.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from itertools import chain 3 | 4 | from jedi.parser import tree 5 | 6 | 7 | def deep_ast_copy(obj, parent=None, new_elements=None): 8 | """ 9 | Much, much faster than copy.deepcopy, but just for Parser elements (Doesn't 10 | copy parents). 11 | """ 12 | 13 | if new_elements is None: 14 | new_elements = {} 15 | 16 | def copy_node(obj): 17 | # If it's already in the cache, just return it. 18 | try: 19 | return new_elements[obj] 20 | except KeyError: 21 | # Actually copy and set attributes. 22 | new_obj = copy.copy(obj) 23 | new_elements[obj] = new_obj 24 | 25 | # Copy children 26 | new_children = [] 27 | for child in obj.children: 28 | typ = child.type 29 | if typ in ('whitespace', 'operator', 'keyword', 'number', 'string'): 30 | # At the moment we're not actually copying those primitive 31 | # elements, because there's really no need to. The parents are 32 | # obviously wrong, but that's not an issue. 33 | new_child = child 34 | elif typ == 'name': 35 | new_elements[child] = new_child = copy.copy(child) 36 | new_child.parent = new_obj 37 | else: # Is a BaseNode. 38 | new_child = copy_node(child) 39 | new_child.parent = new_obj 40 | new_children.append(new_child) 41 | new_obj.children = new_children 42 | 43 | # Copy the names_dict (if there is one). 44 | try: 45 | names_dict = obj.names_dict 46 | except AttributeError: 47 | pass 48 | else: 49 | try: 50 | new_obj.names_dict = new_names_dict = {} 51 | except AttributeError: # Impossible to set CompFor.names_dict 52 | pass 53 | else: 54 | for string, names in names_dict.items(): 55 | new_names_dict[string] = [new_elements[n] for n in names] 56 | return new_obj 57 | 58 | if obj.type == 'name': 59 | # Special case of a Name object. 60 | new_elements[obj] = new_obj = copy.copy(obj) 61 | if parent is not None: 62 | new_obj.parent = parent 63 | elif isinstance(obj, tree.BaseNode): 64 | new_obj = copy_node(obj) 65 | if parent is not None: 66 | for child in new_obj.children: 67 | if isinstance(child, (tree.Name, tree.BaseNode)): 68 | child.parent = parent 69 | else: # String literals and so on. 70 | new_obj = obj # Good enough, don't need to copy anything. 71 | return new_obj 72 | 73 | 74 | def call_of_name(name, cut_own_trailer=False): 75 | """ 76 | Creates a "call" node that consist of all ``trailer`` and ``power`` 77 | objects. E.g. if you call it with ``append``:: 78 | 79 | list([]).append(3) or None 80 | 81 | You would get a node with the content ``list([]).append`` back. 82 | 83 | This generates a copy of the original ast node. 84 | """ 85 | par = name 86 | if tree.is_node(par.parent, 'trailer'): 87 | par = par.parent 88 | 89 | power = par.parent 90 | if tree.is_node(power, 'power') and power.children[0] != name \ 91 | and not (power.children[-2] == '**' and 92 | name.start_pos > power.children[-1].start_pos): 93 | par = power 94 | # Now the name must be part of a trailer 95 | index = par.children.index(name.parent) 96 | if index != len(par.children) - 1 or cut_own_trailer: 97 | # Now we have to cut the other trailers away. 98 | par = deep_ast_copy(par) 99 | if not cut_own_trailer: 100 | # Normally we would remove just the stuff after the index, but 101 | # if the option is set remove the index as well. (for goto) 102 | index = index + 1 103 | par.children[index:] = [] 104 | 105 | return par 106 | 107 | 108 | def get_module_names(module, all_scopes): 109 | """ 110 | Returns a dictionary with name parts as keys and their call paths as 111 | values. 112 | """ 113 | if all_scopes: 114 | dct = module.used_names 115 | else: 116 | dct = module.names_dict 117 | return chain.from_iterable(dct.values()) 118 | 119 | 120 | class FakeImport(tree.ImportName): 121 | def __init__(self, name, parent, level=0): 122 | super(FakeImport, self).__init__([]) 123 | self.parent = parent 124 | self._level = level 125 | self.name = name 126 | 127 | def get_defined_names(self): 128 | return [self.name] 129 | 130 | def aliases(self): 131 | return {} 132 | 133 | @property 134 | def level(self): 135 | return self._level 136 | 137 | @property 138 | def start_pos(self): 139 | return 0, 0 140 | 141 | def paths(self): 142 | return [[self.name]] 143 | 144 | def is_definition(self): 145 | return True 146 | 147 | 148 | class FakeName(tree.Name): 149 | def __init__(self, name_str, parent=None, start_pos=(0, 0), is_definition=None): 150 | """ 151 | In case is_definition is defined (not None), that bool value will be 152 | returned. 153 | """ 154 | super(FakeName, self).__init__(tree.zero_position_modifier, name_str, start_pos) 155 | self.parent = parent 156 | self._is_definition = is_definition 157 | 158 | def get_definition(self): 159 | return self.parent 160 | 161 | def is_definition(self): 162 | if self._is_definition is None: 163 | return super(FakeName, self).is_definition() 164 | else: 165 | return self._is_definition 166 | 167 | 168 | class LazyName(FakeName): 169 | def __init__(self, name, parent_callback, is_definition=None): 170 | super(LazyName, self).__init__(name, is_definition=is_definition) 171 | self._parent_callback = parent_callback 172 | 173 | @property 174 | def parent(self): 175 | return self._parent_callback() 176 | 177 | @parent.setter 178 | def parent(self, value): 179 | pass # Do nothing, super classes can try to set the parent. 180 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/compiled/fake/builtins.pym: -------------------------------------------------------------------------------- 1 | """ 2 | Pure Python implementation of some builtins. 3 | This code is not going to be executed anywhere. 4 | These implementations are not always correct, but should work as good as 5 | possible for the auto completion. 6 | """ 7 | 8 | 9 | def next(iterator, default=None): 10 | if random.choice([0, 1]): 11 | if hasattr("next"): 12 | return iterator.next() 13 | else: 14 | return iterator.__next__() 15 | else: 16 | if default is not None: 17 | return default 18 | 19 | 20 | def iter(collection, sentinel=None): 21 | if sentinel: 22 | yield collection() 23 | else: 24 | for c in collection: 25 | yield c 26 | 27 | 28 | def range(start, stop=None, step=1): 29 | return [0] 30 | 31 | 32 | class file(): 33 | def __iter__(self): 34 | yield '' 35 | def next(self): 36 | return '' 37 | 38 | 39 | class xrange(): 40 | # Attention: this function doesn't exist in Py3k (there it is range). 41 | def __iter__(self): 42 | yield 1 43 | 44 | def count(self): 45 | return 1 46 | 47 | def index(self): 48 | return 1 49 | 50 | 51 | def open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True): 52 | import io 53 | return io.TextIOWrapper(file, mode, buffering, encoding, errors, newline, closefd) 54 | 55 | 56 | def open_python2(name, mode=None, buffering=None): 57 | return file(name, mode, buffering) 58 | 59 | 60 | #-------------------------------------------------------- 61 | # descriptors 62 | #-------------------------------------------------------- 63 | class property(): 64 | def __init__(self, fget, fset=None, fdel=None, doc=None): 65 | self.fget = fget 66 | self.fset = fset 67 | self.fdel = fdel 68 | self.__doc__ = doc 69 | 70 | def __get__(self, obj, cls): 71 | return self.fget(obj) 72 | 73 | def __set__(self, obj, value): 74 | self.fset(obj, value) 75 | 76 | def __delete__(self, obj): 77 | self.fdel(obj) 78 | 79 | def setter(self, func): 80 | self.fset = func 81 | return self 82 | 83 | def getter(self, func): 84 | self.fget = func 85 | return self 86 | 87 | def deleter(self, func): 88 | self.fdel = func 89 | return self 90 | 91 | 92 | class staticmethod(): 93 | def __init__(self, func): 94 | self.__func = func 95 | 96 | def __get__(self, obj, cls): 97 | return self.__func 98 | 99 | 100 | class classmethod(): 101 | def __init__(self, func): 102 | self.__func = func 103 | 104 | def __get__(self, obj, cls): 105 | def _method(*args, **kwargs): 106 | return self.__func(cls, *args, **kwargs) 107 | return _method 108 | 109 | 110 | #-------------------------------------------------------- 111 | # array stuff 112 | #-------------------------------------------------------- 113 | class list(): 114 | def __init__(self, iterable=[]): 115 | self.__iterable = [] 116 | for i in iterable: 117 | self.__iterable += [i] 118 | 119 | def __iter__(self): 120 | for i in self.__iterable: 121 | yield i 122 | 123 | def __getitem__(self, y): 124 | return self.__iterable[y] 125 | 126 | def pop(self): 127 | return self.__iterable[-1] 128 | 129 | 130 | class tuple(): 131 | def __init__(self, iterable=[]): 132 | self.__iterable = [] 133 | for i in iterable: 134 | self.__iterable += [i] 135 | 136 | def __iter__(self): 137 | for i in self.__iterable: 138 | yield i 139 | 140 | def __getitem__(self, y): 141 | return self.__iterable[y] 142 | 143 | def index(self): 144 | return 1 145 | 146 | def count(self): 147 | return 1 148 | 149 | 150 | class set(): 151 | def __init__(self, iterable=[]): 152 | self.__iterable = iterable 153 | 154 | def __iter__(self): 155 | for i in self.__iterable: 156 | yield i 157 | 158 | def pop(self): 159 | return list(self.__iterable)[-1] 160 | 161 | def copy(self): 162 | return self 163 | 164 | def difference(self, other): 165 | return self - other 166 | 167 | def intersection(self, other): 168 | return self & other 169 | 170 | def symmetric_difference(self, other): 171 | return self ^ other 172 | 173 | def union(self, other): 174 | return self | other 175 | 176 | 177 | class frozenset(): 178 | def __init__(self, iterable=[]): 179 | self.__iterable = iterable 180 | 181 | def __iter__(self): 182 | for i in self.__iterable: 183 | yield i 184 | 185 | def copy(self): 186 | return self 187 | 188 | 189 | class dict(): 190 | def __init__(self, **elements): 191 | self.__elements = elements 192 | 193 | def clear(self): 194 | # has a strange docstr 195 | pass 196 | 197 | def get(self, k, d=None): 198 | # TODO implement 199 | try: 200 | #return self.__elements[k] 201 | pass 202 | except KeyError: 203 | return d 204 | 205 | def setdefault(self, k, d): 206 | # TODO maybe also return the content 207 | return d 208 | 209 | 210 | class enumerate(): 211 | def __init__(self, sequence, start=0): 212 | self.__sequence = sequence 213 | 214 | def __iter__(self): 215 | for i in self.__sequence: 216 | yield 1, i 217 | 218 | def __next__(self): 219 | return next(self.__iter__()) 220 | 221 | def next(self): 222 | return next(self.__iter__()) 223 | 224 | 225 | class reversed(): 226 | def __init__(self, sequence): 227 | self.__sequence = sequence 228 | 229 | def __iter__(self): 230 | for i in self.__sequence: 231 | yield i 232 | 233 | def __next__(self): 234 | return next(self.__iter__()) 235 | 236 | def next(self): 237 | return next(self.__iter__()) 238 | 239 | 240 | def sorted(iterable, cmp=None, key=None, reverse=False): 241 | return iterable 242 | 243 | 244 | #-------------------------------------------------------- 245 | # basic types 246 | #-------------------------------------------------------- 247 | class int(): 248 | def __init__(self, x, base=None): 249 | pass 250 | 251 | 252 | class str(): 253 | def __init__(self, obj): 254 | pass 255 | 256 | 257 | class type(): 258 | def mro(): 259 | return [object] 260 | -------------------------------------------------------------------------------- /lib/jedi/_compatibility.py: -------------------------------------------------------------------------------- 1 | """ 2 | To ensure compatibility from Python ``2.6`` - ``3.3``, a module has been 3 | created. Clearly there is huge need to use conforming syntax. 4 | """ 5 | import sys 6 | import imp 7 | import os 8 | import re 9 | try: 10 | import importlib 11 | except ImportError: 12 | pass 13 | 14 | is_py3 = sys.version_info[0] >= 3 15 | is_py33 = is_py3 and sys.version_info.minor >= 3 16 | is_py26 = not is_py3 and sys.version_info[1] < 7 17 | 18 | 19 | def find_module_py33(string, path=None): 20 | loader = importlib.machinery.PathFinder.find_module(string, path) 21 | 22 | if loader is None and path is None: # Fallback to find builtins 23 | try: 24 | loader = importlib.find_loader(string) 25 | except ValueError as e: 26 | # See #491. Importlib might raise a ValueError, to avoid this, we 27 | # just raise an ImportError to fix the issue. 28 | raise ImportError("Originally ValueError: " + e.message) 29 | 30 | if loader is None: 31 | raise ImportError("Couldn't find a loader for {0}".format(string)) 32 | 33 | try: 34 | is_package = loader.is_package(string) 35 | if is_package: 36 | module_path = os.path.dirname(loader.path) 37 | module_file = None 38 | else: 39 | module_path = loader.get_filename(string) 40 | module_file = open(module_path, 'rb') 41 | except AttributeError: 42 | # ExtensionLoader has not attribute get_filename, instead it has a 43 | # path attribute that we can use to retrieve the module path 44 | try: 45 | module_path = loader.path 46 | module_file = open(loader.path, 'rb') 47 | except AttributeError: 48 | module_path = string 49 | module_file = None 50 | finally: 51 | is_package = False 52 | 53 | return module_file, module_path, is_package 54 | 55 | 56 | def find_module_pre_py33(string, path=None): 57 | module_file, module_path, description = imp.find_module(string, path) 58 | module_type = description[2] 59 | return module_file, module_path, module_type is imp.PKG_DIRECTORY 60 | 61 | 62 | find_module = find_module_py33 if is_py33 else find_module_pre_py33 63 | find_module.__doc__ = """ 64 | Provides information about a module. 65 | 66 | This function isolates the differences in importing libraries introduced with 67 | python 3.3 on; it gets a module name and optionally a path. It will return a 68 | tuple containin an open file for the module (if not builtin), the filename 69 | or the name of the module if it is a builtin one and a boolean indicating 70 | if the module is contained in a package. 71 | """ 72 | 73 | 74 | # unicode function 75 | try: 76 | unicode = unicode 77 | except NameError: 78 | unicode = str 79 | 80 | if is_py3: 81 | u = lambda s: s 82 | else: 83 | u = lambda s: s.decode('utf-8') 84 | 85 | u.__doc__ = """ 86 | Decode a raw string into unicode object. Do nothing in Python 3. 87 | """ 88 | 89 | # exec function 90 | if is_py3: 91 | def exec_function(source, global_map): 92 | exec(source, global_map) 93 | else: 94 | eval(compile("""def exec_function(source, global_map): 95 | exec source in global_map """, 'blub', 'exec')) 96 | 97 | # re-raise function 98 | if is_py3: 99 | def reraise(exception, traceback): 100 | raise exception.with_traceback(traceback) 101 | else: 102 | eval(compile(""" 103 | def reraise(exception, traceback): 104 | raise exception, None, traceback 105 | """, 'blub', 'exec')) 106 | 107 | reraise.__doc__ = """ 108 | Re-raise `exception` with a `traceback` object. 109 | 110 | Usage:: 111 | 112 | reraise(Exception, sys.exc_info()[2]) 113 | 114 | """ 115 | 116 | class Python3Method(object): 117 | def __init__(self, func): 118 | self.func = func 119 | 120 | def __get__(self, obj, objtype): 121 | if obj is None: 122 | return lambda *args, **kwargs: self.func(*args, **kwargs) 123 | else: 124 | return lambda *args, **kwargs: self.func(obj, *args, **kwargs) 125 | 126 | 127 | def use_metaclass(meta, *bases): 128 | """ Create a class with a metaclass. """ 129 | if not bases: 130 | bases = (object,) 131 | return meta("HackClass", bases, {}) 132 | 133 | 134 | try: 135 | encoding = sys.stdout.encoding 136 | if encoding is None: 137 | encoding = 'utf-8' 138 | except AttributeError: 139 | encoding = 'ascii' 140 | 141 | 142 | def u(string): 143 | """Cast to unicode DAMMIT! 144 | Written because Python2 repr always implicitly casts to a string, so we 145 | have to cast back to a unicode (and we now that we always deal with valid 146 | unicode, because we check that in the beginning). 147 | """ 148 | if is_py3: 149 | return str(string) 150 | elif not isinstance(string, unicode): 151 | return unicode(str(string), 'UTF-8') 152 | return string 153 | 154 | try: 155 | import builtins # module name in python 3 156 | except ImportError: 157 | import __builtin__ as builtins 158 | 159 | 160 | import ast 161 | 162 | 163 | def literal_eval(string): 164 | # py3.0, py3.1 and py32 don't support unicode literals. Support those, I 165 | # don't want to write two versions of the tokenizer. 166 | if is_py3 and sys.version_info.minor < 3: 167 | if re.match('[uU][\'"]', string): 168 | string = string[1:] 169 | return ast.literal_eval(string) 170 | 171 | 172 | try: 173 | from itertools import zip_longest 174 | except ImportError: 175 | from itertools import izip_longest as zip_longest # Python 2 176 | 177 | 178 | def no_unicode_pprint(dct): 179 | """ 180 | Python 2/3 dict __repr__ may be different, because of unicode differens 181 | (with or without a `u` prefix). Normally in doctests we could use `pprint` 182 | to sort dicts and check for equality, but here we have to write a separate 183 | function to do that. 184 | """ 185 | import pprint 186 | s = pprint.pformat(dct) 187 | print(re.sub("u'", "'", s)) 188 | 189 | 190 | def utf8_repr(func): 191 | """ 192 | ``__repr__`` methods in Python 2 don't allow unicode objects to be 193 | returned. Therefore cast them to utf-8 bytes in this decorator. 194 | """ 195 | def wrapper(self): 196 | result = func(self) 197 | if isinstance(result, unicode): 198 | return result.encode('utf-8') 199 | else: 200 | return result 201 | 202 | if is_py3: 203 | return func 204 | else: 205 | return wrapper 206 | -------------------------------------------------------------------------------- /lib/jedi/parser/grammar3.4.txt: -------------------------------------------------------------------------------- 1 | # Grammar for Python 2 | 3 | # Note: Changing the grammar specified in this file will most likely 4 | # require corresponding changes in the parser module 5 | # (../Modules/parsermodule.c). If you can't make the changes to 6 | # that module yourself, please co-ordinate the required changes 7 | # with someone who can; ask around on python-dev for help. Fred 8 | # Drake will probably be listening there. 9 | 10 | # NOTE WELL: You should also follow all the steps listed in PEP 306, 11 | # "How to Change Python's Grammar" 12 | 13 | # Start symbols for the grammar: 14 | # single_input is a single interactive statement; 15 | # file_input is a module or sequence of commands read from an input file; 16 | # eval_input is the input for the eval() functions. 17 | # NB: compound_stmt in single_input is followed by extra NEWLINE! 18 | file_input: (NEWLINE | stmt)* ENDMARKER 19 | single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE 20 | eval_input: testlist NEWLINE* ENDMARKER 21 | 22 | decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE 23 | decorators: decorator+ 24 | decorated: decorators (classdef | funcdef) 25 | funcdef: 'def' NAME parameters ['->' test] ':' suite 26 | parameters: '(' [typedargslist] ')' 27 | typedargslist: (tfpdef ['=' test] (',' tfpdef ['=' test])* [',' 28 | ['*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef]] 29 | | '*' [tfpdef] (',' tfpdef ['=' test])* [',' '**' tfpdef] | '**' tfpdef) 30 | tfpdef: NAME [':' test] 31 | varargslist: (vfpdef ['=' test] (',' vfpdef ['=' test])* [',' 32 | ['*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef]] 33 | | '*' [vfpdef] (',' vfpdef ['=' test])* [',' '**' vfpdef] | '**' vfpdef) 34 | vfpdef: NAME 35 | 36 | stmt: simple_stmt | compound_stmt 37 | simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE 38 | small_stmt: (expr_stmt | del_stmt | pass_stmt | flow_stmt | 39 | import_stmt | global_stmt | nonlocal_stmt | assert_stmt) 40 | expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | 41 | ('=' (yield_expr|testlist_star_expr))*) 42 | testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] 43 | augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | 44 | '<<=' | '>>=' | '**=' | '//=') 45 | # For normal assignments, additional restrictions enforced by the interpreter 46 | del_stmt: 'del' exprlist 47 | pass_stmt: 'pass' 48 | flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt 49 | break_stmt: 'break' 50 | continue_stmt: 'continue' 51 | return_stmt: 'return' [testlist] 52 | yield_stmt: yield_expr 53 | raise_stmt: 'raise' [test ['from' test]] 54 | import_stmt: import_name | import_from 55 | import_name: 'import' dotted_as_names 56 | # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS 57 | import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 58 | 'import' ('*' | '(' import_as_names ')' | import_as_names)) 59 | import_as_name: NAME ['as' NAME] 60 | dotted_as_name: dotted_name ['as' NAME] 61 | import_as_names: import_as_name (',' import_as_name)* [','] 62 | dotted_as_names: dotted_as_name (',' dotted_as_name)* 63 | dotted_name: NAME ('.' NAME)* 64 | global_stmt: 'global' NAME (',' NAME)* 65 | nonlocal_stmt: 'nonlocal' NAME (',' NAME)* 66 | assert_stmt: 'assert' test [',' test] 67 | 68 | compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated 69 | if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] 70 | while_stmt: 'while' test ':' suite ['else' ':' suite] 71 | for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] 72 | try_stmt: ('try' ':' suite 73 | ((except_clause ':' suite)+ 74 | ['else' ':' suite] 75 | ['finally' ':' suite] | 76 | 'finally' ':' suite)) 77 | with_stmt: 'with' with_item (',' with_item)* ':' suite 78 | with_item: test ['as' expr] 79 | # NB compile.c makes sure that the default except clause is last 80 | except_clause: 'except' [test ['as' NAME]] 81 | # Edit by David Halter: The stmt is now optional. This reflects how Jedi allows 82 | # classes and functions to be empty, which is beneficial for autocompletion. 83 | suite: simple_stmt | NEWLINE INDENT stmt* DEDENT 84 | 85 | test: or_test ['if' or_test 'else' test] | lambdef 86 | test_nocond: or_test | lambdef_nocond 87 | lambdef: 'lambda' [varargslist] ':' test 88 | lambdef_nocond: 'lambda' [varargslist] ':' test_nocond 89 | or_test: and_test ('or' and_test)* 90 | and_test: not_test ('and' not_test)* 91 | not_test: 'not' not_test | comparison 92 | comparison: expr (comp_op expr)* 93 | # <> isn't actually a valid comparison operator in Python. It's here for the 94 | # sake of a __future__ import described in PEP 401 95 | comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' 96 | star_expr: '*' expr 97 | expr: xor_expr ('|' xor_expr)* 98 | xor_expr: and_expr ('^' and_expr)* 99 | and_expr: shift_expr ('&' shift_expr)* 100 | shift_expr: arith_expr (('<<'|'>>') arith_expr)* 101 | arith_expr: term (('+'|'-') term)* 102 | term: factor (('*'|'/'|'%'|'//') factor)* 103 | factor: ('+'|'-'|'~') factor | power 104 | power: atom trailer* ['**' factor] 105 | atom: ('(' [yield_expr|testlist_comp] ')' | 106 | '[' [testlist_comp] ']' | 107 | '{' [dictorsetmaker] '}' | 108 | NAME | NUMBER | STRING+ | '...' | 'None' | 'True' | 'False') 109 | testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) 110 | trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME 111 | subscriptlist: subscript (',' subscript)* [','] 112 | subscript: test | [test] ':' [test] [sliceop] 113 | sliceop: ':' [test] 114 | exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] 115 | testlist: test (',' test)* [','] 116 | dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | 117 | (test (comp_for | (',' test)* [','])) ) 118 | 119 | classdef: 'class' NAME ['(' [arglist] ')'] ':' suite 120 | 121 | arglist: (argument ',')* (argument [','] 122 | |'*' test (',' argument)* [',' '**' test] 123 | |'**' test) 124 | # The reason that keywords are test nodes instead of NAME is that using NAME 125 | # results in an ambiguity. ast.c makes sure it's a NAME. 126 | argument: test [comp_for] | test '=' test # Really [keyword '='] test 127 | comp_iter: comp_for | comp_if 128 | comp_for: 'for' exprlist 'in' or_test [comp_iter] 129 | comp_if: 'if' test_nocond [comp_iter] 130 | 131 | # not used in grammar, but may appear in "node" passed from Parser to Compiler 132 | encoding_decl: NAME 133 | 134 | yield_expr: 'yield' [yield_arg] 135 | yield_arg: 'from' test | testlist 136 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/precedence.py: -------------------------------------------------------------------------------- 1 | """ 2 | Handles operator precedence. 3 | """ 4 | import operator 5 | 6 | from jedi._compatibility import unicode 7 | from jedi.parser import tree 8 | from jedi import debug 9 | from jedi.evaluate.compiled import (CompiledObject, create, builtin, 10 | keyword_from_value, true_obj, false_obj) 11 | from jedi.evaluate import analysis 12 | 13 | # Maps Python syntax to the operator module. 14 | COMPARISON_OPERATORS = { 15 | '==': operator.eq, 16 | '!=': operator.ne, 17 | 'is': operator.is_, 18 | 'is not': operator.is_not, 19 | '<': operator.lt, 20 | '<=': operator.le, 21 | '>': operator.gt, 22 | '>=': operator.ge, 23 | } 24 | 25 | 26 | def _literals_to_types(evaluator, result): 27 | # Changes literals ('a', 1, 1.0, etc) to its type instances (str(), 28 | # int(), float(), etc). 29 | for i, r in enumerate(result): 30 | if is_literal(r): 31 | # Literals are only valid as long as the operations are 32 | # correct. Otherwise add a value-free instance. 33 | cls = builtin.get_by_name(r.name.get_code()) 34 | result[i] = evaluator.execute(cls)[0] 35 | return list(set(result)) 36 | 37 | 38 | def calculate_children(evaluator, children): 39 | """ 40 | Calculate a list of children with operators. 41 | """ 42 | iterator = iter(children) 43 | types = evaluator.eval_element(next(iterator)) 44 | for operator in iterator: 45 | right = next(iterator) 46 | if tree.is_node(operator, 'comp_op'): # not in / is not 47 | operator = ' '.join(str(c.value) for c in operator.children) 48 | 49 | # handle lazy evaluation of and/or here. 50 | if operator in ('and', 'or'): 51 | left_bools = set([left.py__bool__() for left in types]) 52 | if left_bools == set([True]): 53 | if operator == 'and': 54 | types = evaluator.eval_element(right) 55 | elif left_bools == set([False]): 56 | if operator != 'and': 57 | types = evaluator.eval_element(right) 58 | # Otherwise continue, because of uncertainty. 59 | else: 60 | types = calculate(evaluator, types, operator, 61 | evaluator.eval_element(right)) 62 | debug.dbg('calculate_children types %s', types) 63 | return types 64 | 65 | 66 | def calculate(evaluator, left_result, operator, right_result): 67 | result = [] 68 | if not left_result or not right_result: 69 | # illegal slices e.g. cause left/right_result to be None 70 | result = (left_result or []) + (right_result or []) 71 | result = _literals_to_types(evaluator, result) 72 | else: 73 | # I don't think there's a reasonable chance that a string 74 | # operation is still correct, once we pass something like six 75 | # objects. 76 | if len(left_result) * len(right_result) > 6: 77 | result = _literals_to_types(evaluator, left_result + right_result) 78 | else: 79 | for left in left_result: 80 | for right in right_result: 81 | result += _element_calculate(evaluator, left, operator, right) 82 | return result 83 | 84 | 85 | def factor_calculate(evaluator, types, operator): 86 | """ 87 | Calculates `+`, `-`, `~` and `not` prefixes. 88 | """ 89 | for typ in types: 90 | if operator == '-': 91 | if _is_number(typ): 92 | yield create(evaluator, -typ.obj) 93 | elif operator == 'not': 94 | value = typ.py__bool__() 95 | if value is None: # Uncertainty. 96 | return 97 | yield keyword_from_value(not value) 98 | else: 99 | yield typ 100 | 101 | 102 | def _is_number(obj): 103 | return isinstance(obj, CompiledObject) \ 104 | and isinstance(obj.obj, (int, float)) 105 | 106 | 107 | def is_string(obj): 108 | return isinstance(obj, CompiledObject) \ 109 | and isinstance(obj.obj, (str, unicode)) 110 | 111 | 112 | def is_literal(obj): 113 | return _is_number(obj) or is_string(obj) 114 | 115 | 116 | def _is_tuple(obj): 117 | from jedi.evaluate import iterable 118 | return isinstance(obj, iterable.Array) and obj.type == 'tuple' 119 | 120 | 121 | def _is_list(obj): 122 | from jedi.evaluate import iterable 123 | return isinstance(obj, iterable.Array) and obj.type == 'list' 124 | 125 | 126 | def _element_calculate(evaluator, left, operator, right): 127 | from jedi.evaluate import iterable, representation as er 128 | l_is_num = _is_number(left) 129 | r_is_num = _is_number(right) 130 | if operator == '*': 131 | # for iterables, ignore * operations 132 | if isinstance(left, iterable.Array) or is_string(left): 133 | return [left] 134 | elif isinstance(right, iterable.Array) or is_string(right): 135 | return [right] 136 | elif operator == '+': 137 | if l_is_num and r_is_num or is_string(left) and is_string(right): 138 | return [create(evaluator, left.obj + right.obj)] 139 | elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right): 140 | return [iterable.MergedArray(evaluator, (left, right))] 141 | elif operator == '-': 142 | if l_is_num and r_is_num: 143 | return [create(evaluator, left.obj - right.obj)] 144 | elif operator == '%': 145 | # With strings and numbers the left type typically remains. Except for 146 | # `int() % float()`. 147 | return [left] 148 | elif operator in COMPARISON_OPERATORS: 149 | operation = COMPARISON_OPERATORS[operator] 150 | if isinstance(left, CompiledObject) and isinstance(right, CompiledObject): 151 | # Possible, because the return is not an option. Just compare. 152 | left = left.obj 153 | right = right.obj 154 | 155 | try: 156 | return [keyword_from_value(operation(left, right))] 157 | except TypeError: 158 | # Could be True or False. 159 | return [true_obj, false_obj] 160 | elif operator == 'in': 161 | return [] 162 | 163 | def check(obj): 164 | """Checks if a Jedi object is either a float or an int.""" 165 | return isinstance(obj, er.Instance) and obj.name.get_code() in ('int', 'float') 166 | 167 | # Static analysis, one is a number, the other one is not. 168 | if operator in ('+', '-') and l_is_num != r_is_num \ 169 | and not (check(left) or check(right)): 170 | message = "TypeError: unsupported operand type(s) for +: %s and %s" 171 | analysis.add(evaluator, 'type-error-operation', operator, 172 | message % (left, right)) 173 | 174 | return [left, right] 175 | -------------------------------------------------------------------------------- /lib/jedi/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | This module contains variables with global |jedi| settings. To change the 3 | behavior of |jedi|, change the variables defined in :mod:`jedi.settings`. 4 | 5 | Plugins should expose an interface so that the user can adjust the 6 | configuration. 7 | 8 | 9 | Example usage:: 10 | 11 | from jedi import settings 12 | settings.case_insensitive_completion = True 13 | 14 | 15 | Completion output 16 | ~~~~~~~~~~~~~~~~~ 17 | 18 | .. autodata:: case_insensitive_completion 19 | .. autodata:: add_dot_after_module 20 | .. autodata:: add_bracket_after_function 21 | .. autodata:: no_completion_duplicates 22 | 23 | 24 | Filesystem cache 25 | ~~~~~~~~~~~~~~~~ 26 | 27 | .. autodata:: cache_directory 28 | .. autodata:: use_filesystem_cache 29 | 30 | 31 | Parser 32 | ~~~~~~ 33 | 34 | .. autodata:: fast_parser 35 | 36 | 37 | Dynamic stuff 38 | ~~~~~~~~~~~~~ 39 | 40 | .. autodata:: dynamic_array_additions 41 | .. autodata:: dynamic_params 42 | .. autodata:: dynamic_params_for_other_modules 43 | .. autodata:: additional_dynamic_modules 44 | .. autodata:: auto_import_modules 45 | 46 | 47 | .. _settings-recursion: 48 | 49 | Recursions 50 | ~~~~~~~~~~ 51 | 52 | Recursion settings are important if you don't want extremly 53 | recursive python code to go absolutely crazy. First of there is a 54 | global limit :data:`max_executions`. This limit is important, to set 55 | a maximum amount of time, the completion may use. 56 | 57 | The default values are based on experiments while completing the |jedi| library 58 | itself (inception!). But I don't think there's any other Python library that 59 | uses recursion in a similarly extreme way. These settings make the completion 60 | definitely worse in some cases. But a completion should also be fast. 61 | 62 | .. autodata:: max_until_execution_unique 63 | .. autodata:: max_function_recursion_level 64 | .. autodata:: max_executions_without_builtins 65 | .. autodata:: max_executions 66 | .. autodata:: scale_call_signatures 67 | 68 | 69 | Caching 70 | ~~~~~~~ 71 | 72 | .. autodata:: star_import_cache_validity 73 | .. autodata:: call_signatures_validity 74 | 75 | 76 | """ 77 | import os 78 | import platform 79 | 80 | # ---------------- 81 | # completion output settings 82 | # ---------------- 83 | 84 | case_insensitive_completion = True 85 | """ 86 | The completion is by default case insensitive. 87 | """ 88 | 89 | add_dot_after_module = False 90 | """ 91 | Adds a dot after a module, because a module that is not accessed this way is 92 | definitely not the normal case. However, in VIM this doesn't work, that's why 93 | it isn't used at the moment. 94 | """ 95 | 96 | add_bracket_after_function = False 97 | """ 98 | Adds an opening bracket after a function, because that's normal behaviour. 99 | Removed it again, because in VIM that is not very practical. 100 | """ 101 | 102 | no_completion_duplicates = True 103 | """ 104 | If set, completions with the same name don't appear in the output anymore, 105 | but are in the `same_name_completions` attribute. 106 | """ 107 | 108 | # ---------------- 109 | # Filesystem cache 110 | # ---------------- 111 | 112 | use_filesystem_cache = True 113 | """ 114 | Use filesystem cache to save once parsed files with pickle. 115 | """ 116 | 117 | if platform.system().lower() == 'windows': 118 | _cache_directory = os.path.join(os.getenv('APPDATA') or '~', 'Jedi', 119 | 'Jedi') 120 | elif platform.system().lower() == 'darwin': 121 | _cache_directory = os.path.join('~', 'Library', 'Caches', 'Jedi') 122 | else: 123 | _cache_directory = os.path.join(os.getenv('XDG_CACHE_HOME') or '~/.cache', 124 | 'jedi') 125 | cache_directory = os.path.expanduser(_cache_directory) 126 | """ 127 | The path where all the caches can be found. 128 | 129 | On Linux, this defaults to ``~/.cache/jedi/``, on OS X to 130 | ``~/Library/Caches/Jedi/`` and on Windows to ``%APPDATA%\\Jedi\\Jedi\\``. 131 | On Linux, if environment variable ``$XDG_CACHE_HOME`` is set, 132 | ``$XDG_CACHE_HOME/jedi`` is used instead of the default one. 133 | """ 134 | 135 | # ---------------- 136 | # parser 137 | # ---------------- 138 | 139 | fast_parser = True 140 | """ 141 | Use the fast parser. This means that reparsing is only being done if 142 | something has been changed e.g. to a function. If this happens, only the 143 | function is being reparsed. 144 | """ 145 | 146 | # ---------------- 147 | # dynamic stuff 148 | # ---------------- 149 | 150 | dynamic_array_additions = True 151 | """ 152 | check for `append`, etc. on arrays: [], {}, () as well as list/set calls. 153 | """ 154 | 155 | dynamic_params = True 156 | """ 157 | A dynamic param completion, finds the callees of the function, which define 158 | the params of a function. 159 | """ 160 | 161 | dynamic_params_for_other_modules = True 162 | """ 163 | Do the same for other modules. 164 | """ 165 | 166 | additional_dynamic_modules = [] 167 | """ 168 | Additional modules in which |jedi| checks if statements are to be found. This 169 | is practical for IDEs, that want to administrate their modules themselves. 170 | """ 171 | 172 | dynamic_flow_information = True 173 | """ 174 | Check for `isinstance` and other information to infer a type. 175 | """ 176 | 177 | auto_import_modules = [ 178 | 'hashlib', # setattr 179 | ] 180 | """ 181 | Modules that are not analyzed but imported, although they contain Python code. 182 | This improves autocompletion for libraries that use ``setattr`` or 183 | ``globals()`` modifications a lot. 184 | """ 185 | 186 | # ---------------- 187 | # recursions 188 | # ---------------- 189 | 190 | max_until_execution_unique = 50 191 | """ 192 | This limit is probably the most important one, because if this limit is 193 | exceeded, functions can only be one time executed. So new functions will be 194 | executed, complex recursions with the same functions again and again, are 195 | ignored. 196 | """ 197 | 198 | max_function_recursion_level = 5 199 | """ 200 | `max_function_recursion_level` is more about whether the recursions are 201 | stopped in deepth or in width. The ratio beetween this and 202 | `max_until_execution_unique` is important here. It stops a recursion (after 203 | the number of function calls in the recursion), if it was already used 204 | earlier. 205 | """ 206 | 207 | max_executions_without_builtins = 200 208 | """ 209 | .. todo:: Document this. 210 | """ 211 | 212 | max_executions = 250 213 | """ 214 | A maximum amount of time, the completion may use. 215 | """ 216 | 217 | scale_call_signatures = 0.1 218 | """ 219 | Because call_signatures is normally used on every single key hit, it has 220 | to be faster than a normal completion. This is the factor that is used to 221 | scale `max_executions` and `max_until_execution_unique`: 222 | """ 223 | 224 | # ---------------- 225 | # caching validity (time) 226 | # ---------------- 227 | 228 | star_import_cache_validity = 60.0 229 | """ 230 | In huge packages like numpy, checking all star imports on every completion 231 | might be slow, therefore we do a star import caching, that lasts a certain 232 | time span (in seconds). 233 | """ 234 | 235 | call_signatures_validity = 3.0 236 | """ 237 | Finding function calls might be slow (0.1-0.5s). This is not acceptible for 238 | normal writing. Therefore cache it for a short time. 239 | """ 240 | -------------------------------------------------------------------------------- /lib/jedi/parser/grammar2.7.txt: -------------------------------------------------------------------------------- 1 | # Grammar for 2to3. This grammar supports Python 2.x and 3.x. 2 | 3 | # Note: Changing the grammar specified in this file will most likely 4 | # require corresponding changes in the parser module 5 | # (../Modules/parsermodule.c). If you can't make the changes to 6 | # that module yourself, please co-ordinate the required changes 7 | # with someone who can; ask around on python-dev for help. Fred 8 | # Drake will probably be listening there. 9 | 10 | # NOTE WELL: You should also follow all the steps listed in PEP 306, 11 | # "How to Change Python's Grammar" 12 | 13 | 14 | # Start symbols for the grammar: 15 | # file_input is a module or sequence of commands read from an input file; 16 | # single_input is a single interactive statement; 17 | # eval_input is the input for the eval() and input() functions. 18 | # NB: compound_stmt in single_input is followed by extra NEWLINE! 19 | file_input: (NEWLINE | stmt)* ENDMARKER 20 | single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE 21 | eval_input: testlist NEWLINE* ENDMARKER 22 | 23 | decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE 24 | decorators: decorator+ 25 | decorated: decorators (classdef | funcdef) 26 | funcdef: 'def' NAME parameters ['->' test] ':' suite 27 | parameters: '(' [typedargslist] ')' 28 | typedargslist: ((tfpdef ['=' test] ',')* 29 | ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname) 30 | | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) 31 | tname: NAME [':' test] 32 | tfpdef: tname | '(' tfplist ')' 33 | tfplist: tfpdef (',' tfpdef)* [','] 34 | varargslist: ((vfpdef ['=' test] ',')* 35 | ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname) 36 | | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) 37 | vname: NAME 38 | vfpdef: vname | '(' vfplist ')' 39 | vfplist: vfpdef (',' vfpdef)* [','] 40 | 41 | stmt: simple_stmt | compound_stmt 42 | simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE 43 | small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | 44 | import_stmt | global_stmt | exec_stmt | assert_stmt) 45 | expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | 46 | ('=' (yield_expr|testlist_star_expr))*) 47 | testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] 48 | augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | 49 | '<<=' | '>>=' | '**=' | '//=') 50 | # For normal assignments, additional restrictions enforced by the interpreter 51 | print_stmt: 'print' ( [ test (',' test)* [','] ] | 52 | '>>' test [ (',' test)+ [','] ] ) 53 | del_stmt: 'del' exprlist 54 | pass_stmt: 'pass' 55 | flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt 56 | break_stmt: 'break' 57 | continue_stmt: 'continue' 58 | return_stmt: 'return' [testlist] 59 | yield_stmt: yield_expr 60 | raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] 61 | import_stmt: import_name | import_from 62 | import_name: 'import' dotted_as_names 63 | # note below: the ('.' | '...') is necessary because '...' is tokenized as ELLIPSIS 64 | import_from: ('from' (('.' | '...')* dotted_name | ('.' | '...')+) 65 | 'import' ('*' | '(' import_as_names ')' | import_as_names)) 66 | import_as_name: NAME ['as' NAME] 67 | dotted_as_name: dotted_name ['as' NAME] 68 | import_as_names: import_as_name (',' import_as_name)* [','] 69 | dotted_as_names: dotted_as_name (',' dotted_as_name)* 70 | dotted_name: NAME ('.' NAME)* 71 | global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* 72 | exec_stmt: 'exec' expr ['in' test [',' test]] 73 | assert_stmt: 'assert' test [',' test] 74 | 75 | compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated 76 | if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] 77 | while_stmt: 'while' test ':' suite ['else' ':' suite] 78 | for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] 79 | try_stmt: ('try' ':' suite 80 | ((except_clause ':' suite)+ 81 | ['else' ':' suite] 82 | ['finally' ':' suite] | 83 | 'finally' ':' suite)) 84 | with_stmt: 'with' with_item (',' with_item)* ':' suite 85 | with_item: test ['as' expr] 86 | with_var: 'as' expr 87 | # NB compile.c makes sure that the default except clause is last 88 | except_clause: 'except' [test [(',' | 'as') test]] 89 | # Edit by David Halter: The stmt is now optional. This reflects how Jedi allows 90 | # classes and functions to be empty, which is beneficial for autocompletion. 91 | suite: simple_stmt | NEWLINE INDENT stmt* DEDENT 92 | 93 | # Backward compatibility cruft to support: 94 | # [ x for x in lambda: True, lambda: False if x() ] 95 | # even while also allowing: 96 | # lambda x: 5 if x else 2 97 | # (But not a mix of the two) 98 | testlist_safe: old_test [(',' old_test)+ [',']] 99 | old_test: or_test | old_lambdef 100 | old_lambdef: 'lambda' [varargslist] ':' old_test 101 | 102 | test: or_test ['if' or_test 'else' test] | lambdef 103 | or_test: and_test ('or' and_test)* 104 | and_test: not_test ('and' not_test)* 105 | not_test: 'not' not_test | comparison 106 | comparison: expr (comp_op expr)* 107 | comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' 108 | star_expr: '*' expr 109 | expr: xor_expr ('|' xor_expr)* 110 | xor_expr: and_expr ('^' and_expr)* 111 | and_expr: shift_expr ('&' shift_expr)* 112 | shift_expr: arith_expr (('<<'|'>>') arith_expr)* 113 | arith_expr: term (('+'|'-') term)* 114 | term: factor (('*'|'/'|'%'|'//') factor)* 115 | factor: ('+'|'-'|'~') factor | power 116 | power: atom trailer* ['**' factor] 117 | atom: ('(' [yield_expr|testlist_comp] ')' | 118 | '[' [testlist_comp] ']' | 119 | '{' [dictorsetmaker] '}' | 120 | '`' testlist1 '`' | 121 | NAME | NUMBER | STRING+ | '.' '.' '.') 122 | # Modification by David Halter, remove `testlist_gexp` and `listmaker` 123 | testlist_comp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) 124 | lambdef: 'lambda' [varargslist] ':' test 125 | trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME 126 | subscriptlist: subscript (',' subscript)* [','] 127 | subscript: test | [test] ':' [test] [sliceop] 128 | sliceop: ':' [test] 129 | exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] 130 | testlist: test (',' test)* [','] 131 | # Modification by David Halter, dictsetmaker -> dictorsetmaker (so that it's 132 | # the same as in the 3.4 grammar). 133 | dictorsetmaker: ( (test ':' test (comp_for | (',' test ':' test)* [','])) | 134 | (test (comp_for | (',' test)* [','])) ) 135 | 136 | classdef: 'class' NAME ['(' [arglist] ')'] ':' suite 137 | 138 | arglist: (argument ',')* (argument [','] 139 | |'*' test (',' argument)* [',' '**' test] 140 | |'**' test) 141 | argument: test [comp_for] | test '=' test # Really [keyword '='] test 142 | 143 | comp_iter: comp_for | comp_if 144 | comp_for: 'for' exprlist 'in' testlist_safe [comp_iter] 145 | comp_if: 'if' old_test [comp_iter] 146 | 147 | testlist1: test (',' test)* 148 | 149 | # not used in grammar, but may appear in "node" passed from Parser to Compiler 150 | encoding_decl: NAME 151 | 152 | yield_expr: 'yield' [testlist] 153 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/docstrings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Docstrings are another source of information for functions and classes. 3 | :mod:`jedi.evaluate.dynamic` tries to find all executions of functions, while 4 | the docstring parsing is much easier. There are two different types of 5 | docstrings that |jedi| understands: 6 | 7 | - `Sphinx `_ 8 | - `Epydoc `_ 9 | 10 | For example, the sphinx annotation ``:type foo: str`` clearly states that the 11 | type of ``foo`` is ``str``. 12 | 13 | As an addition to parameter searching, this module also provides return 14 | annotations. 15 | """ 16 | 17 | from ast import literal_eval 18 | import re 19 | from itertools import chain 20 | from textwrap import dedent 21 | 22 | from jedi.evaluate.cache import memoize_default 23 | from jedi.parser import Parser, load_grammar 24 | from jedi.common import indent_block 25 | from jedi.evaluate.iterable import Array, FakeSequence, AlreadyEvaluated 26 | 27 | 28 | DOCSTRING_PARAM_PATTERNS = [ 29 | r'\s*:type\s+%s:\s*([^\n]+)', # Sphinx 30 | r'\s*:param\s+(\w+)\s+%s:[^\n]+', # Sphinx param with type 31 | r'\s*@type\s+%s:\s*([^\n]+)', # Epydoc 32 | ] 33 | 34 | DOCSTRING_RETURN_PATTERNS = [ 35 | re.compile(r'\s*:rtype:\s*([^\n]+)', re.M), # Sphinx 36 | re.compile(r'\s*@rtype:\s*([^\n]+)', re.M), # Epydoc 37 | ] 38 | 39 | REST_ROLE_PATTERN = re.compile(r':[^`]+:`([^`]+)`') 40 | 41 | 42 | try: 43 | from numpydoc.docscrape import NumpyDocString 44 | except ImportError: 45 | def _search_param_in_numpydocstr(docstr, param_str): 46 | return [] 47 | else: 48 | def _search_param_in_numpydocstr(docstr, param_str): 49 | """Search `docstr` (in numpydoc format) for type(-s) of `param_str`.""" 50 | params = NumpyDocString(docstr)._parsed_data['Parameters'] 51 | for p_name, p_type, p_descr in params: 52 | if p_name == param_str: 53 | m = re.match('([^,]+(,[^,]+)*?)(,[ ]*optional)?$', p_type) 54 | if m: 55 | p_type = m.group(1) 56 | 57 | if p_type.startswith('{'): 58 | types = set(type(x).__name__ for x in literal_eval(p_type)) 59 | return list(types) 60 | else: 61 | return [p_type] 62 | return [] 63 | 64 | 65 | def _search_param_in_docstr(docstr, param_str): 66 | """ 67 | Search `docstr` for type(-s) of `param_str`. 68 | 69 | >>> _search_param_in_docstr(':type param: int', 'param') 70 | ['int'] 71 | >>> _search_param_in_docstr('@type param: int', 'param') 72 | ['int'] 73 | >>> _search_param_in_docstr( 74 | ... ':type param: :class:`threading.Thread`', 'param') 75 | ['threading.Thread'] 76 | >>> bool(_search_param_in_docstr('no document', 'param')) 77 | False 78 | >>> _search_param_in_docstr(':param int param: some description', 'param') 79 | ['int'] 80 | 81 | """ 82 | # look at #40 to see definitions of those params 83 | patterns = [re.compile(p % re.escape(param_str)) 84 | for p in DOCSTRING_PARAM_PATTERNS] 85 | for pattern in patterns: 86 | match = pattern.search(docstr) 87 | if match: 88 | return [_strip_rst_role(match.group(1))] 89 | 90 | return (_search_param_in_numpydocstr(docstr, param_str) or 91 | []) 92 | 93 | 94 | def _strip_rst_role(type_str): 95 | """ 96 | Strip off the part looks like a ReST role in `type_str`. 97 | 98 | >>> _strip_rst_role(':class:`ClassName`') # strip off :class: 99 | 'ClassName' 100 | >>> _strip_rst_role(':py:obj:`module.Object`') # works with domain 101 | 'module.Object' 102 | >>> _strip_rst_role('ClassName') # do nothing when not ReST role 103 | 'ClassName' 104 | 105 | See also: 106 | http://sphinx-doc.org/domains.html#cross-referencing-python-objects 107 | 108 | """ 109 | match = REST_ROLE_PATTERN.match(type_str) 110 | if match: 111 | return match.group(1) 112 | else: 113 | return type_str 114 | 115 | 116 | def _evaluate_for_statement_string(evaluator, string, module): 117 | code = dedent(""" 118 | def pseudo_docstring_stuff(): 119 | # Create a pseudo function for docstring statements. 120 | %s 121 | """) 122 | if string is None: 123 | return [] 124 | 125 | for element in re.findall('((?:\w+\.)*\w+)\.', string): 126 | # Try to import module part in dotted name. 127 | # (e.g., 'threading' in 'threading.Thread'). 128 | string = 'import %s\n' % element + string 129 | 130 | # Take the default grammar here, if we load the Python 2.7 grammar here, it 131 | # will be impossible to use `...` (Ellipsis) as a token. Docstring types 132 | # don't need to conform with the current grammar. 133 | p = Parser(load_grammar(), code % indent_block(string)) 134 | try: 135 | pseudo_cls = p.module.subscopes[0] 136 | # First pick suite, then simple_stmt (-2 for DEDENT) and then the node, 137 | # which is also not the last item, because there's a newline. 138 | stmt = pseudo_cls.children[-1].children[-2].children[-2] 139 | except (AttributeError, IndexError): 140 | return [] 141 | 142 | # Use the module of the param. 143 | # TODO this module is not the module of the param in case of a function 144 | # call. In that case it's the module of the function call. 145 | # stuffed with content from a function call. 146 | pseudo_cls.parent = module 147 | return list(_execute_types_in_stmt(evaluator, stmt)) 148 | 149 | 150 | def _execute_types_in_stmt(evaluator, stmt): 151 | """ 152 | Executing all types or general elements that we find in a statement. This 153 | doesn't include tuple, list and dict literals, because the stuff they 154 | contain is executed. (Used as type information). 155 | """ 156 | definitions = evaluator.eval_element(stmt) 157 | return chain.from_iterable(_execute_array_values(evaluator, d) for d in definitions) 158 | 159 | 160 | def _execute_array_values(evaluator, array): 161 | """ 162 | Tuples indicate that there's not just one return value, but the listed 163 | ones. `(str, int)` means that it returns a tuple with both types. 164 | """ 165 | if isinstance(array, Array): 166 | values = [] 167 | for typ in array.values(): 168 | objects = _execute_array_values(evaluator, typ) 169 | values.append(AlreadyEvaluated(objects)) 170 | return [FakeSequence(evaluator, values, array.type)] 171 | else: 172 | return evaluator.execute(array) 173 | 174 | 175 | @memoize_default(None, evaluator_is_first_arg=True) 176 | def follow_param(evaluator, param): 177 | func = param.parent_function 178 | 179 | return [p 180 | for param_str in _search_param_in_docstr(func.raw_doc, 181 | str(param.name)) 182 | for p in _evaluate_for_statement_string(evaluator, param_str, 183 | param.get_parent_until())] 184 | 185 | 186 | @memoize_default(None, evaluator_is_first_arg=True) 187 | def find_return_types(evaluator, func): 188 | def search_return_in_docstr(code): 189 | for p in DOCSTRING_RETURN_PATTERNS: 190 | match = p.search(code) 191 | if match: 192 | return _strip_rst_role(match.group(1)) 193 | 194 | type_str = search_return_in_docstr(func.raw_doc) 195 | return _evaluate_for_statement_string(evaluator, type_str, func.get_parent_until()) 196 | -------------------------------------------------------------------------------- /lib/jedi/refactoring.py: -------------------------------------------------------------------------------- 1 | """ 2 | Introduce some basic refactoring functions to |jedi|. This module is still in a 3 | very early development stage and needs much testing and improvement. 4 | 5 | .. warning:: I won't do too much here, but if anyone wants to step in, please 6 | do. Refactoring is none of my priorities 7 | 8 | It uses the |jedi| `API `_ and supports currently the 9 | following functions (sometimes bug-prone): 10 | 11 | - rename 12 | - extract variable 13 | - inline variable 14 | """ 15 | import difflib 16 | 17 | from jedi import common 18 | from jedi.evaluate import helpers 19 | from jedi.parser import tree as pt 20 | 21 | 22 | class Refactoring(object): 23 | def __init__(self, change_dct): 24 | """ 25 | :param change_dct: dict(old_path=(new_path, old_lines, new_lines)) 26 | """ 27 | self.change_dct = change_dct 28 | 29 | def old_files(self): 30 | dct = {} 31 | for old_path, (new_path, old_l, new_l) in self.change_dct.items(): 32 | dct[new_path] = '\n'.join(new_l) 33 | return dct 34 | 35 | def new_files(self): 36 | dct = {} 37 | for old_path, (new_path, old_l, new_l) in self.change_dct.items(): 38 | dct[new_path] = '\n'.join(new_l) 39 | return dct 40 | 41 | def diff(self): 42 | texts = [] 43 | for old_path, (new_path, old_l, new_l) in self.change_dct.items(): 44 | if old_path: 45 | udiff = difflib.unified_diff(old_l, new_l) 46 | else: 47 | udiff = difflib.unified_diff(old_l, new_l, old_path, new_path) 48 | texts.append('\n'.join(udiff)) 49 | return '\n'.join(texts) 50 | 51 | 52 | def rename(script, new_name): 53 | """ The `args` / `kwargs` params are the same as in `api.Script`. 54 | :param operation: The refactoring operation to execute. 55 | :type operation: str 56 | :type source: str 57 | :return: list of changed lines/changed files 58 | """ 59 | return Refactoring(_rename(script.usages(), new_name)) 60 | 61 | 62 | def _rename(names, replace_str): 63 | """ For both rename and inline. """ 64 | order = sorted(names, key=lambda x: (x.module_path, x.line, x.column), 65 | reverse=True) 66 | 67 | def process(path, old_lines, new_lines): 68 | if new_lines is not None: # goto next file, save last 69 | dct[path] = path, old_lines, new_lines 70 | 71 | dct = {} 72 | current_path = object() 73 | new_lines = old_lines = None 74 | for name in order: 75 | if name.in_builtin_module(): 76 | continue 77 | if current_path != name.module_path: 78 | current_path = name.module_path 79 | 80 | process(current_path, old_lines, new_lines) 81 | if current_path is not None: 82 | # None means take the source that is a normal param. 83 | with open(current_path) as f: 84 | source = f.read() 85 | 86 | new_lines = common.splitlines(common.source_to_unicode(source)) 87 | old_lines = new_lines[:] 88 | 89 | nr, indent = name.line, name.column 90 | line = new_lines[nr - 1] 91 | new_lines[nr - 1] = line[:indent] + replace_str + \ 92 | line[indent + len(name.name):] 93 | process(current_path, old_lines, new_lines) 94 | return dct 95 | 96 | 97 | def extract(script, new_name): 98 | """ The `args` / `kwargs` params are the same as in `api.Script`. 99 | :param operation: The refactoring operation to execute. 100 | :type operation: str 101 | :type source: str 102 | :return: list of changed lines/changed files 103 | """ 104 | new_lines = common.splitlines(common.source_to_unicode(script.source)) 105 | old_lines = new_lines[:] 106 | 107 | user_stmt = script._parser.user_stmt() 108 | 109 | # TODO care for multiline extracts 110 | dct = {} 111 | if user_stmt: 112 | pos = script._pos 113 | line_index = pos[0] - 1 114 | arr, index = helpers.array_for_pos(user_stmt, pos) 115 | if arr is not None: 116 | start_pos = arr[index].start_pos 117 | end_pos = arr[index].end_pos 118 | 119 | # take full line if the start line is different from end line 120 | e = end_pos[1] if end_pos[0] == start_pos[0] else None 121 | start_line = new_lines[start_pos[0] - 1] 122 | text = start_line[start_pos[1]:e] 123 | for l in range(start_pos[0], end_pos[0] - 1): 124 | text += '\n' + l 125 | if e is None: 126 | end_line = new_lines[end_pos[0] - 1] 127 | text += '\n' + end_line[:end_pos[1]] 128 | 129 | # remove code from new lines 130 | t = text.lstrip() 131 | del_start = start_pos[1] + len(text) - len(t) 132 | 133 | text = t.rstrip() 134 | del_end = len(t) - len(text) 135 | if e is None: 136 | new_lines[end_pos[0] - 1] = end_line[end_pos[1] - del_end:] 137 | e = len(start_line) 138 | else: 139 | e = e - del_end 140 | start_line = start_line[:del_start] + new_name + start_line[e:] 141 | new_lines[start_pos[0] - 1] = start_line 142 | new_lines[start_pos[0]:end_pos[0] - 1] = [] 143 | 144 | # add parentheses in multiline case 145 | open_brackets = ['(', '[', '{'] 146 | close_brackets = [')', ']', '}'] 147 | if '\n' in text and not (text[0] in open_brackets and text[-1] == 148 | close_brackets[open_brackets.index(text[0])]): 149 | text = '(%s)' % text 150 | 151 | # add new line before statement 152 | indent = user_stmt.start_pos[1] 153 | new = "%s%s = %s" % (' ' * indent, new_name, text) 154 | new_lines.insert(line_index, new) 155 | dct[script.path] = script.path, old_lines, new_lines 156 | return Refactoring(dct) 157 | 158 | 159 | def inline(script): 160 | """ 161 | :type script: api.Script 162 | """ 163 | new_lines = common.splitlines(common.source_to_unicode(script.source)) 164 | 165 | dct = {} 166 | 167 | definitions = script.goto_assignments() 168 | with common.ignored(AssertionError): 169 | assert len(definitions) == 1 170 | stmt = definitions[0]._definition 171 | usages = script.usages() 172 | inlines = [r for r in usages 173 | if not stmt.start_pos <= (r.line, r.column) <= stmt.end_pos] 174 | inlines = sorted(inlines, key=lambda x: (x.module_path, x.line, x.column), 175 | reverse=True) 176 | expression_list = stmt.expression_list() 177 | # don't allow multiline refactorings for now. 178 | assert stmt.start_pos[0] == stmt.end_pos[0] 179 | index = stmt.start_pos[0] - 1 180 | 181 | line = new_lines[index] 182 | replace_str = line[expression_list[0].start_pos[1]:stmt.end_pos[1] + 1] 183 | replace_str = replace_str.strip() 184 | # tuples need parentheses 185 | if expression_list and isinstance(expression_list[0], pr.Array): 186 | arr = expression_list[0] 187 | if replace_str[0] not in ['(', '[', '{'] and len(arr) > 1: 188 | replace_str = '(%s)' % replace_str 189 | 190 | # if it's the only assignment, remove the statement 191 | if len(stmt.get_defined_names()) == 1: 192 | line = line[:stmt.start_pos[1]] + line[stmt.end_pos[1]:] 193 | 194 | dct = _rename(inlines, replace_str) 195 | # remove the empty line 196 | new_lines = dct[script.path][2] 197 | if line.strip(): 198 | new_lines[index] = line 199 | else: 200 | new_lines.pop(index) 201 | 202 | return Refactoring(dct) 203 | -------------------------------------------------------------------------------- /lib/jedi/parser/pgen2/parse.py: -------------------------------------------------------------------------------- 1 | # Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. 2 | # Licensed to PSF under a Contributor Agreement. 3 | 4 | # Modifications: 5 | # Copyright 2014 David Halter. Integration into Jedi. 6 | # Modifications are dual-licensed: MIT and PSF. 7 | 8 | """ 9 | Parser engine for the grammar tables generated by pgen. 10 | 11 | The grammar table must be loaded first. 12 | 13 | See Parser/parser.c in the Python distribution for additional info on 14 | how this parsing engine works. 15 | """ 16 | 17 | # Local imports 18 | from jedi.parser import tokenize 19 | 20 | 21 | class ParseError(Exception): 22 | """Exception to signal the parser is stuck.""" 23 | 24 | def __init__(self, msg, type, value, start_pos): 25 | Exception.__init__(self, "%s: type=%r, value=%r, start_pos=%r" % 26 | (msg, tokenize.tok_name[type], value, start_pos)) 27 | self.msg = msg 28 | self.type = type 29 | self.value = value 30 | self.start_pos = start_pos 31 | 32 | 33 | class PgenParser(object): 34 | """Parser engine. 35 | 36 | The proper usage sequence is: 37 | 38 | p = Parser(grammar, [converter]) # create instance 39 | p.setup([start]) # prepare for parsing 40 | : 41 | if p.addtoken(...): # parse a token; may raise ParseError 42 | break 43 | root = p.rootnode # root of abstract syntax tree 44 | 45 | A Parser instance may be reused by calling setup() repeatedly. 46 | 47 | A Parser instance contains state pertaining to the current token 48 | sequence, and should not be used concurrently by different threads 49 | to parse separate token sequences. 50 | 51 | See driver.py for how to get input tokens by tokenizing a file or 52 | string. 53 | 54 | Parsing is complete when addtoken() returns True; the root of the 55 | abstract syntax tree can then be retrieved from the rootnode 56 | instance variable. When a syntax error occurs, addtoken() raises 57 | the ParseError exception. There is no error recovery; the parser 58 | cannot be used after a syntax error was reported (but it can be 59 | reinitialized by calling setup()). 60 | 61 | """ 62 | 63 | def __init__(self, grammar, convert_node, convert_leaf, error_recovery): 64 | """Constructor. 65 | 66 | The grammar argument is a grammar.Grammar instance; see the 67 | grammar module for more information. 68 | 69 | The parser is not ready yet for parsing; you must call the 70 | setup() method to get it started. 71 | 72 | The optional convert argument is a function mapping concrete 73 | syntax tree nodes to abstract syntax tree nodes. If not 74 | given, no conversion is done and the syntax tree produced is 75 | the concrete syntax tree. If given, it must be a function of 76 | two arguments, the first being the grammar (a grammar.Grammar 77 | instance), and the second being the concrete syntax tree node 78 | to be converted. The syntax tree is converted from the bottom 79 | up. 80 | 81 | A concrete syntax tree node is a (type, nodes) tuple, where 82 | type is the node type (a token or symbol number) and nodes 83 | is a list of children for symbols, and None for tokens. 84 | 85 | An abstract syntax tree node may be anything; this is entirely 86 | up to the converter function. 87 | 88 | """ 89 | self.grammar = grammar 90 | self.convert_node = convert_node 91 | self.convert_leaf = convert_leaf 92 | 93 | # Prepare for parsing. 94 | start = self.grammar.start 95 | # Each stack entry is a tuple: (dfa, state, node). 96 | # A node is a tuple: (type, children), 97 | # where children is a list of nodes or None 98 | newnode = (start, []) 99 | stackentry = (self.grammar.dfas[start], 0, newnode) 100 | self.stack = [stackentry] 101 | self.rootnode = None 102 | self.error_recovery = error_recovery 103 | 104 | def parse(self, tokenizer): 105 | for type, value, prefix, start_pos in tokenizer: 106 | if self.addtoken(type, value, prefix, start_pos): 107 | break 108 | else: 109 | # We never broke out -- EOF is too soon -- Unfinished statement. 110 | self.error_recovery(self.grammar, self.stack, type, value, 111 | start_pos, prefix, self.addtoken) 112 | # Add the ENDMARKER again. 113 | if not self.addtoken(type, value, prefix, start_pos): 114 | raise ParseError("incomplete input", type, value, start_pos) 115 | return self.rootnode 116 | 117 | def addtoken(self, type, value, prefix, start_pos): 118 | """Add a token; return True if this is the end of the program.""" 119 | # Map from token to label 120 | if type == tokenize.NAME: 121 | # Check for reserved words (keywords) 122 | try: 123 | ilabel = self.grammar.keywords[value] 124 | except KeyError: 125 | ilabel = self.grammar.tokens[type] 126 | else: 127 | ilabel = self.grammar.tokens[type] 128 | 129 | # Loop until the token is shifted; may raise exceptions 130 | while True: 131 | dfa, state, node = self.stack[-1] 132 | states, first = dfa 133 | arcs = states[state] 134 | # Look for a state with this label 135 | for i, newstate in arcs: 136 | t, v = self.grammar.labels[i] 137 | if ilabel == i: 138 | # Look it up in the list of labels 139 | assert t < 256 140 | # Shift a token; we're done with it 141 | self.shift(type, value, newstate, prefix, start_pos) 142 | # Pop while we are in an accept-only state 143 | state = newstate 144 | while states[state] == [(0, state)]: 145 | self.pop() 146 | if not self.stack: 147 | # Done parsing! 148 | return True 149 | dfa, state, node = self.stack[-1] 150 | states, first = dfa 151 | # Done with this token 152 | return False 153 | elif t >= 256: 154 | # See if it's a symbol and if we're in its first set 155 | itsdfa = self.grammar.dfas[t] 156 | itsstates, itsfirst = itsdfa 157 | if ilabel in itsfirst: 158 | # Push a symbol 159 | self.push(t, itsdfa, newstate) 160 | break # To continue the outer while loop 161 | else: 162 | if (0, state) in arcs: 163 | # An accepting state, pop it and try something else 164 | self.pop() 165 | if not self.stack: 166 | # Done parsing, but another token is input 167 | raise ParseError("too much input", type, value, start_pos) 168 | else: 169 | self.error_recovery(self.grammar, self.stack, type, 170 | value, start_pos, prefix, self.addtoken) 171 | break 172 | 173 | def shift(self, type, value, newstate, prefix, start_pos): 174 | """Shift a token. (Internal)""" 175 | dfa, state, node = self.stack[-1] 176 | newnode = self.convert_leaf(self.grammar, type, value, prefix, start_pos) 177 | node[-1].append(newnode) 178 | self.stack[-1] = (dfa, newstate, node) 179 | 180 | def push(self, type, newdfa, newstate): 181 | """Push a nonterminal. (Internal)""" 182 | dfa, state, node = self.stack[-1] 183 | newnode = (type, []) 184 | self.stack[-1] = (dfa, newstate, node) 185 | self.stack.append((newdfa, 0, newnode)) 186 | 187 | def pop(self): 188 | """Pop a nonterminal. (Internal)""" 189 | popdfa, popstate, (type, children) = self.stack.pop() 190 | # If there's exactly one child, return that child instead of creating a 191 | # new node. We still create expr_stmt and file_input though, because a 192 | # lot of Jedi depends on its logic. 193 | if len(children) == 1: 194 | newnode = children[0] 195 | else: 196 | newnode = self.convert_node(self.grammar, type, children) 197 | 198 | try: 199 | # Equal to: 200 | # dfa, state, node = self.stack[-1] 201 | # symbol, children = node 202 | self.stack[-1][2][1].append(newnode) 203 | except IndexError: 204 | # Stack is empty, set the rootnode. 205 | self.rootnode = newnode 206 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/stdlib.py: -------------------------------------------------------------------------------- 1 | """ 2 | Implementations of standard library functions, because it's not possible to 3 | understand them with Jedi. 4 | 5 | To add a new implementation, create a function and add it to the 6 | ``_implemented`` dict at the bottom of this module. 7 | 8 | """ 9 | import collections 10 | import re 11 | 12 | from jedi._compatibility import unicode 13 | from jedi.evaluate import compiled 14 | from jedi.evaluate import representation as er 15 | from jedi.evaluate import iterable 16 | from jedi.parser import Parser 17 | from jedi.parser import tree 18 | from jedi import debug 19 | from jedi.evaluate import precedence 20 | from jedi.evaluate import param 21 | 22 | 23 | class NotInStdLib(LookupError): 24 | pass 25 | 26 | 27 | def execute(evaluator, obj, params): 28 | try: 29 | obj_name = str(obj.name) 30 | except AttributeError: 31 | pass 32 | else: 33 | if obj.parent == compiled.builtin: 34 | module_name = 'builtins' 35 | elif isinstance(obj.parent, tree.Module): 36 | module_name = str(obj.parent.name) 37 | else: 38 | module_name = '' 39 | 40 | # for now we just support builtin functions. 41 | try: 42 | return _implemented[module_name][obj_name](evaluator, obj, params) 43 | except KeyError: 44 | pass 45 | raise NotInStdLib() 46 | 47 | 48 | def _follow_param(evaluator, params, index): 49 | try: 50 | key, values = list(params.unpack())[index] 51 | except IndexError: 52 | return [] 53 | else: 54 | return iterable.unite(evaluator.eval_element(v) for v in values) 55 | 56 | 57 | def argument_clinic(string, want_obj=False, want_scope=False): 58 | """ 59 | Works like Argument Clinic (PEP 436), to validate function params. 60 | """ 61 | clinic_args = [] 62 | allow_kwargs = False 63 | optional = False 64 | while string: 65 | # Optional arguments have to begin with a bracket. And should always be 66 | # at the end of the arguments. This is therefore not a proper argument 67 | # clinic implementation. `range()` for exmple allows an optional start 68 | # value at the beginning. 69 | match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string) 70 | string = string[len(match.group(0)):] 71 | if not match.group(2): # A slash -> allow named arguments 72 | allow_kwargs = True 73 | continue 74 | optional = optional or bool(match.group(1)) 75 | word = match.group(2) 76 | clinic_args.append((word, optional, allow_kwargs)) 77 | 78 | def f(func): 79 | def wrapper(evaluator, obj, arguments): 80 | try: 81 | lst = list(arguments.eval_argument_clinic(clinic_args)) 82 | except ValueError: 83 | return [] 84 | else: 85 | kwargs = {} 86 | if want_scope: 87 | kwargs['scope'] = arguments.scope() 88 | if want_obj: 89 | kwargs['obj'] = obj 90 | return func(evaluator, *lst, **kwargs) 91 | 92 | return wrapper 93 | return f 94 | 95 | 96 | @argument_clinic('object, name[, default], /') 97 | def builtins_getattr(evaluator, objects, names, defaults=None): 98 | types = [] 99 | # follow the first param 100 | for obj in objects: 101 | if not isinstance(obj, (er.Instance, er.Class, tree.Module, compiled.CompiledObject)): 102 | debug.warning('getattr called without instance') 103 | continue 104 | 105 | for name in names: 106 | if precedence.is_string(name): 107 | return evaluator.find_types(obj, name.obj) 108 | else: 109 | debug.warning('getattr called without str') 110 | continue 111 | return types 112 | 113 | 114 | @argument_clinic('object[, bases, dict], /') 115 | def builtins_type(evaluator, objects, bases, dicts): 116 | if bases or dicts: 117 | # metaclass... maybe someday... 118 | return [] 119 | else: 120 | return [o.base for o in objects if isinstance(o, er.Instance)] 121 | 122 | 123 | class SuperInstance(er.Instance): 124 | """To be used like the object ``super`` returns.""" 125 | def __init__(self, evaluator, cls): 126 | su = cls.py_mro()[1] 127 | super().__init__(evaluator, su and su[0] or self) 128 | 129 | 130 | @argument_clinic('[type[, obj]], /', want_scope=True) 131 | def builtins_super(evaluator, types, objects, scope): 132 | # TODO make this able to detect multiple inheritance super 133 | accept = (tree.Function, er.FunctionExecution) 134 | if scope.isinstance(*accept): 135 | wanted = (tree.Class, er.Instance) 136 | cls = scope.get_parent_until(accept + wanted, 137 | include_current=False) 138 | if isinstance(cls, wanted): 139 | if isinstance(cls, tree.Class): 140 | cls = er.Class(evaluator, cls) 141 | elif isinstance(cls, er.Instance): 142 | cls = cls.base 143 | su = cls.py__bases__(evaluator) 144 | if su: 145 | return evaluator.execute(su[0]) 146 | return [] 147 | 148 | 149 | @argument_clinic('sequence, /', want_obj=True) 150 | def builtins_reversed(evaluator, sequences, obj): 151 | # Unpack the iterator values 152 | objects = tuple(iterable.get_iterator_types(sequences)) 153 | rev = [iterable.AlreadyEvaluated([o]) for o in reversed(objects)] 154 | # Repack iterator values and then run it the normal way. This is 155 | # necessary, because `reversed` is a function and autocompletion 156 | # would fail in certain cases like `reversed(x).__iter__` if we 157 | # just returned the result directly. 158 | rev = iterable.AlreadyEvaluated( 159 | [iterable.FakeSequence(evaluator, rev, 'list')] 160 | ) 161 | return [er.Instance(evaluator, obj, param.Arguments(evaluator, [rev]))] 162 | 163 | 164 | @argument_clinic('obj, type, /') 165 | def builtins_isinstance(evaluator, objects, types): 166 | bool_results = set([]) 167 | for o in objects: 168 | try: 169 | mro_func = o.py__class__(evaluator).py__mro__ 170 | except AttributeError: 171 | # This is temporary. Everything should have a class attribute in 172 | # Python?! Maybe we'll leave it here, because some numpy objects or 173 | # whatever might not. 174 | return [compiled.true_obj, compiled.false_obj] 175 | 176 | mro = mro_func(evaluator) 177 | 178 | for cls_or_tup in types: 179 | if cls_or_tup.is_class(): 180 | bool_results.add(cls_or_tup in mro) 181 | else: 182 | # Check for tuples. 183 | classes = iterable.get_iterator_types([cls_or_tup]) 184 | bool_results.add(any(cls in mro for cls in classes)) 185 | 186 | return [compiled.keyword_from_value(x) for x in bool_results] 187 | 188 | 189 | def collections_namedtuple(evaluator, obj, params): 190 | """ 191 | Implementation of the namedtuple function. 192 | 193 | This has to be done by processing the namedtuple class template and 194 | evaluating the result. 195 | 196 | .. note:: |jedi| only supports namedtuples on Python >2.6. 197 | 198 | """ 199 | # Namedtuples are not supported on Python 2.6 200 | if not hasattr(collections, '_class_template'): 201 | return [] 202 | 203 | # Process arguments 204 | name = _follow_param(evaluator, params, 0)[0].obj 205 | _fields = _follow_param(evaluator, params, 1)[0] 206 | if isinstance(_fields, compiled.CompiledObject): 207 | fields = _fields.obj.replace(',', ' ').split() 208 | elif isinstance(_fields, iterable.Array): 209 | try: 210 | fields = [v.obj for v in _fields.values()] 211 | except AttributeError: 212 | return [] 213 | else: 214 | return [] 215 | 216 | # Build source 217 | source = collections._class_template.format( 218 | typename=name, 219 | field_names=fields, 220 | num_fields=len(fields), 221 | arg_list=', '.join(fields), 222 | repr_fmt=', '.join(collections._repr_template.format(name=name) for name in fields), 223 | field_defs='\n'.join(collections._field_template.format(index=index, name=name) 224 | for index, name in enumerate(fields)) 225 | ) 226 | 227 | # Parse source 228 | generated_class = Parser(evaluator.grammar, unicode(source)).module.subscopes[0] 229 | return [er.Class(evaluator, generated_class)] 230 | 231 | 232 | @argument_clinic('first, /') 233 | def _return_first_param(evaluator, firsts): 234 | return firsts 235 | 236 | 237 | _implemented = { 238 | 'builtins': { 239 | 'getattr': builtins_getattr, 240 | 'type': builtins_type, 241 | 'super': builtins_super, 242 | 'reversed': builtins_reversed, 243 | 'isinstance': builtins_isinstance, 244 | }, 245 | 'copy': { 246 | 'copy': _return_first_param, 247 | 'deepcopy': _return_first_param, 248 | }, 249 | 'json': { 250 | 'load': lambda *args: [], 251 | 'loads': lambda *args: [], 252 | }, 253 | 'collections': { 254 | 'namedtuple': collections_namedtuple, 255 | }, 256 | } 257 | -------------------------------------------------------------------------------- /spec/python-tools-spec.coffee: -------------------------------------------------------------------------------- 1 | PythonTools = require('../lib/python-tools'); 2 | {Point, Range} = require('atom'); 3 | 4 | describe "PythonTools", -> 5 | pythonTools = null 6 | beforeEach -> 7 | waitsForPromise -> 8 | atom.packages.activatePackage('python-tools') 9 | waitsForPromise -> 10 | atom.packages.activatePackage('language-python') 11 | runs -> 12 | pythonTools = atom.packages.getActivePackage('python-tools').mainModule 13 | 14 | describe "when running jedi commands", -> 15 | editor = null 16 | beforeEach -> 17 | waitsForPromise -> 18 | atom.workspace.open('test.py') 19 | 20 | runs -> 21 | editor = atom.workspace.getActiveTextEditor() 22 | editor.setText(""" 23 | import json 24 | """) 25 | 26 | it "does not send too many commands over time", -> 27 | editor.setCursorBufferPosition(new Point(0, 9)) 28 | spyOn(pythonTools, 'handleJediToolsResponse') 29 | waitsForPromise -> 30 | pythonTools.jediToolsRequest('gotoDef') 31 | waitsForPromise -> 32 | pythonTools.jediToolsRequest('gotoDef').then -> 33 | expect(pythonTools.handleJediToolsResponse.calls.length).toEqual(2) 34 | 35 | describe "when running the goto definitions command", -> 36 | editor = null 37 | beforeEach -> 38 | waitsForPromise -> 39 | atom.workspace.open('mike.py') 40 | 41 | runs -> 42 | editor = atom.workspace.getActiveTextEditor() 43 | editor.setText(""" 44 | import json 45 | 46 | class Snake(object): 47 | def slither(self, dict): 48 | return json.dumps(dict) 49 | 50 | snake = Snake() 51 | snake.slither({'x': 10, 'y': 20}) 52 | 53 | i_dont_exist() 54 | """) 55 | 56 | it "moves to the correct class location", -> 57 | editor.setCursorBufferPosition(new Point(6, 9)) 58 | waitsForPromise -> 59 | pythonTools.jediToolsRequest('gotoDef').then( () -> 60 | expect(editor.getCursorBufferPosition()).toEqual(new Point(3, 6)) 61 | ) 62 | 63 | it "moves to the correct method location", -> 64 | editor.setCursorBufferPosition(new Point(7, 7)) 65 | waitsForPromise -> 66 | pythonTools.jediToolsRequest('gotoDef').then( () -> 67 | expect(editor.getCursorBufferPosition()).toEqual(new Point(4, 8)) 68 | ) 69 | 70 | it "does nothing if symbol does not exist", -> 71 | editor.setCursorBufferPosition(new Point(9, 7)) 72 | waitsForPromise -> 73 | pythonTools.jediToolsRequest('gotoDef').then( () -> 74 | expect(editor.getCursorBufferPosition()).toEqual(new Point(9, 7)) 75 | ) 76 | 77 | it "opens appropriate file if required", -> 78 | editor.setCursorBufferPosition(new Point(0, 9)) 79 | spyOn(atom.workspace, 'open').andCallThrough() 80 | waitsForPromise -> 81 | pythonTools.jediToolsRequest('gotoDef').then( () -> 82 | path = atom.workspace.open.mostRecentCall.args[0] 83 | if /^win/.test process.platform 84 | expect(path).toMatch(/.*\\json\\__init__.py/) 85 | else 86 | expect(path).toMatch(/.*\/json\/__init__.py/) 87 | ) 88 | 89 | describe "when tools.py gets an invalid request", -> 90 | editor = null 91 | beforeEach -> 92 | waitsForPromise -> 93 | atom.workspace.open('error.py') 94 | 95 | runs -> 96 | editor = atom.workspace.getActiveTextEditor() 97 | 98 | describe "when running the show usages command", -> 99 | editor = null 100 | beforeEach -> 101 | waitsForPromise -> 102 | atom.workspace.open('foo.py') 103 | 104 | runs -> 105 | editor = atom.workspace.getActiveTextEditor() 106 | editor.setText(""" 107 | def my_function(a, b): 108 | return a + b 109 | 110 | print my_function(10, 20) 111 | """) 112 | 113 | xit "selects the correct symbols", -> 114 | editor.setCursorBufferPosition(new Point(3, 8)) 115 | waitsForPromise -> 116 | pythonTools.jediToolsRequest('usages').then( ()-> 117 | expect(editor.getSelectedBufferRanges()).toEqual([ 118 | new Range(new Point(0, 4), new Point(0, 15)), 119 | new Range(new Point(3, 6), new Point(3, 17)), 120 | ]) 121 | ) 122 | 123 | xit "doesn't alter current selection on no results", -> 124 | editor.setCursorBufferPosition(new Point(3, 2)) 125 | waitsForPromise -> 126 | pythonTools.jediToolsRequest('usages').then( () -> 127 | expect(editor.getSelectedBufferRanges()).toEqual([ 128 | new Range(new Point(3, 2), new Point(3, 2)) 129 | ]) 130 | ) 131 | 132 | describe "when running the select string command", -> 133 | editor = null 134 | beforeEach -> 135 | waitsForPromise -> 136 | atom.workspace.open('lolcat.py') 137 | 138 | runs -> 139 | editor = atom.workspace.getActiveTextEditor() 140 | editor.setText(""" 141 | class Lolcat(object): 142 | mystring = 'hello world' 143 | anotherstring = "this is some text" 144 | block_text = \"\"\" 145 | This was a triumph! 146 | I'm making a note here: 147 | Huge success! 148 | \"\"\" 149 | more_blocks = ''' 150 | This is some text 151 | ''' 152 | sql_text = \"\"\"SELECT * 153 | FROM foo 154 | \"\"\" 155 | sql_text2 = '''SELECT * 156 | FROM bar 157 | ''' 158 | """) 159 | 160 | it "selects single-line single qoutes correctly", -> 161 | editor.setCursorBufferPosition(new Point(1, 17)) 162 | pythonTools.selectAllString() 163 | expect(editor.getSelectedBufferRange()).toEqual(new Range( 164 | new Point(1, 14), 165 | new Point(1, 25), 166 | ) 167 | ) 168 | 169 | it "selects single-line double qoutes correctly", -> 170 | editor.setCursorBufferPosition(new Point(2, 25)) 171 | pythonTools.selectAllString() 172 | expect(editor.getSelectedBufferRange()).toEqual(new Range( 173 | new Point(2, 19), 174 | new Point(2, 36), 175 | ) 176 | ) 177 | 178 | it "selects block string double qoutes correctly", -> 179 | atom.config.set('python-tools.smartBlockSelection', false) 180 | editor.setCursorBufferPosition(new Point(4, 15)) 181 | pythonTools.selectAllString() 182 | expect(editor.getSelectedBufferRange()).toEqual(new Range( 183 | new Point(3, 18), 184 | new Point(7, 2), 185 | ) 186 | ) 187 | 188 | it "smart selects double qoutes correctly", -> 189 | editor.setCursorBufferPosition(new Point(4, 15)) 190 | pythonTools.selectAllString() 191 | expect(editor.getSelectedBufferRanges()).toEqual([ 192 | new Range(new Point(4, 2), new Point(4, 21)), 193 | new Range(new Point(5, 2), new Point(5, 25)), 194 | new Range(new Point(6, 2), new Point(6, 15)), 195 | ]) 196 | 197 | it "selects block string single qoutes correctly", -> 198 | atom.config.set('python-tools.smartBlockSelection', false) 199 | editor.setCursorBufferPosition(new Point(9, 15)) 200 | pythonTools.selectAllString() 201 | expect(editor.getSelectedBufferRange()).toEqual(new Range( 202 | new Point(8, 19), 203 | new Point(10, 2), 204 | ) 205 | ) 206 | 207 | it "smart selects single qoutes correctly", -> 208 | editor.setCursorBufferPosition(new Point(9, 15)) 209 | pythonTools.selectAllString() 210 | expect(editor.getSelectedBufferRanges()).toEqual([ 211 | new Range(new Point(9, 2), new Point(9, 19)), 212 | ]) 213 | 214 | it "it selects block SQL double qoutes correctly", -> 215 | atom.config.set('python-tools.smartBlockSelection', false) 216 | editor.setCursorBufferPosition(new Point(12, 20)) 217 | pythonTools.selectAllString() 218 | expect(editor.getSelectedBufferRange()).toEqual(new Range( 219 | new Point(11, 16), 220 | new Point(13, 2), 221 | ) 222 | ) 223 | 224 | it "it selects block SQL single qoutes correctly", -> 225 | atom.config.set('python-tools.smartBlockSelection', false) 226 | editor.setCursorBufferPosition(new Point(14, 20)) 227 | pythonTools.selectAllString() 228 | expect(editor.getSelectedBufferRange()).toEqual(new Range( 229 | new Point(14, 17), 230 | new Point(16, 2), 231 | ) 232 | ) 233 | 234 | describe "when a response is returned from tools.py", -> 235 | 236 | it "informs the user with an info notification when no items were found", -> 237 | pythonTools.handleJediToolsResponse( 238 | type: "usages" 239 | definitions: [] 240 | ) 241 | [notification] = atom.notifications.getNotifications() 242 | expect(notification.type).toBe('info') 243 | 244 | it "informs the user with an error notification on error", -> 245 | pythonTools.handleJediToolsResponse( 246 | "error": "this is a test error" 247 | ) 248 | [notification] = atom.notifications.getNotifications() 249 | expect(notification.type).toBe('error') 250 | 251 | it "informs the user with an error notification on invalid type", -> 252 | pythonTools.handleJediToolsResponse( 253 | type: "monkeys" 254 | definitions: [{ 255 | line: 0 256 | column: 0 257 | } ] 258 | ) 259 | [notification] = atom.notifications.getNotifications() 260 | expect(notification.type).toBe('error') 261 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/sys_path.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import os 3 | import sys 4 | 5 | from jedi._compatibility import exec_function, unicode 6 | from jedi.parser import tree 7 | from jedi.parser import Parser 8 | from jedi.evaluate.cache import memoize_default 9 | from jedi import debug 10 | from jedi import common 11 | from jedi import cache 12 | 13 | 14 | def get_sys_path(): 15 | def check_virtual_env(sys_path): 16 | """ Add virtualenv's site-packages to the `sys.path`.""" 17 | venv = os.getenv('VIRTUAL_ENV') 18 | if not venv: 19 | return 20 | venv = os.path.abspath(venv) 21 | p = _get_venv_sitepackages(venv) 22 | if p not in sys_path: 23 | sys_path.insert(0, p) 24 | 25 | # Add all egg-links from the virtualenv. 26 | for egg_link in glob.glob(os.path.join(p, '*.egg-link')): 27 | with open(egg_link) as fd: 28 | sys_path.insert(0, fd.readline().rstrip()) 29 | 30 | check_virtual_env(sys.path) 31 | return [p for p in sys.path if p != ""] 32 | 33 | 34 | def _get_venv_sitepackages(venv): 35 | if os.name == 'nt': 36 | p = os.path.join(venv, 'lib', 'site-packages') 37 | else: 38 | p = os.path.join(venv, 'lib', 'python%d.%d' % sys.version_info[:2], 39 | 'site-packages') 40 | return p 41 | 42 | 43 | def _execute_code(module_path, code): 44 | c = "import os; from os.path import *; result=%s" 45 | variables = {'__file__': module_path} 46 | try: 47 | exec_function(c % code, variables) 48 | except Exception: 49 | debug.warning('sys.path manipulation detected, but failed to evaluate.') 50 | else: 51 | try: 52 | res = variables['result'] 53 | if isinstance(res, str): 54 | return [os.path.abspath(res)] 55 | except KeyError: 56 | pass 57 | return [] 58 | 59 | 60 | def _paths_from_assignment(evaluator, expr_stmt): 61 | """ 62 | Extracts the assigned strings from an assignment that looks as follows:: 63 | 64 | >>> sys.path[0:0] = ['module/path', 'another/module/path'] 65 | 66 | This function is in general pretty tolerant (and therefore 'buggy'). 67 | However, it's not a big issue usually to add more paths to Jedi's sys_path, 68 | because it will only affect Jedi in very random situations and by adding 69 | more paths than necessary, it usually benefits the general user. 70 | """ 71 | for assignee, operator in zip(expr_stmt.children[::2], expr_stmt.children[1::2]): 72 | try: 73 | assert operator in ['=', '+='] 74 | assert tree.is_node(assignee, 'power') and len(assignee.children) > 1 75 | c = assignee.children 76 | assert c[0].type == 'name' and c[0].value == 'sys' 77 | trailer = c[1] 78 | assert trailer.children[0] == '.' and trailer.children[1].value == 'path' 79 | # TODO Essentially we're not checking details on sys.path 80 | # manipulation. Both assigment of the sys.path and changing/adding 81 | # parts of the sys.path are the same: They get added to the current 82 | # sys.path. 83 | """ 84 | execution = c[2] 85 | assert execution.children[0] == '[' 86 | subscript = execution.children[1] 87 | assert subscript.type == 'subscript' 88 | assert ':' in subscript.children 89 | """ 90 | except AssertionError: 91 | continue 92 | 93 | from jedi.evaluate.iterable import get_iterator_types 94 | from jedi.evaluate.precedence import is_string 95 | for val in get_iterator_types(evaluator.eval_statement(expr_stmt)): 96 | if is_string(val): 97 | yield val.obj 98 | 99 | 100 | def _paths_from_list_modifications(module_path, trailer1, trailer2): 101 | """ extract the path from either "sys.path.append" or "sys.path.insert" """ 102 | # Guarantee that both are trailers, the first one a name and the second one 103 | # a function execution with at least one param. 104 | if not (tree.is_node(trailer1, 'trailer') and trailer1.children[0] == '.' 105 | and tree.is_node(trailer2, 'trailer') and trailer2.children[0] == '(' 106 | and len(trailer2.children) == 3): 107 | return [] 108 | 109 | name = trailer1.children[1].value 110 | if name not in ['insert', 'append']: 111 | return [] 112 | 113 | arg = trailer2.children[1] 114 | if name == 'insert' and len(arg.children) in (3, 4): # Possible trailing comma. 115 | arg = arg.children[2] 116 | return _execute_code(module_path, arg.get_code()) 117 | 118 | 119 | def _check_module(evaluator, module): 120 | def get_sys_path_powers(names): 121 | for name in names: 122 | power = name.parent.parent 123 | if tree.is_node(power, 'power'): 124 | c = power.children 125 | if isinstance(c[0], tree.Name) and c[0].value == 'sys' \ 126 | and tree.is_node(c[1], 'trailer'): 127 | n = c[1].children[1] 128 | if isinstance(n, tree.Name) and n.value == 'path': 129 | yield name, power 130 | 131 | sys_path = list(get_sys_path()) # copy 132 | try: 133 | possible_names = module.used_names['path'] 134 | except KeyError: 135 | pass 136 | else: 137 | for name, power in get_sys_path_powers(possible_names): 138 | stmt = name.get_definition() 139 | if len(power.children) >= 4: 140 | sys_path.extend(_paths_from_list_modifications(module.path, *power.children[2:4])) 141 | elif name.get_definition().type == 'expr_stmt': 142 | sys_path.extend(_paths_from_assignment(evaluator, stmt)) 143 | return sys_path 144 | 145 | 146 | @memoize_default(evaluator_is_first_arg=True, default=[]) 147 | def sys_path_with_modifications(evaluator, module): 148 | if module.path is None: 149 | # Support for modules without a path is bad, therefore return the 150 | # normal path. 151 | return list(get_sys_path()) 152 | 153 | curdir = os.path.abspath(os.curdir) 154 | with common.ignored(OSError): 155 | os.chdir(os.path.dirname(module.path)) 156 | 157 | buildout_script_paths = set() 158 | 159 | result = _check_module(evaluator, module) 160 | result += _detect_django_path(module.path) 161 | for buildout_script in _get_buildout_scripts(module.path): 162 | for path in _get_paths_from_buildout_script(evaluator, buildout_script): 163 | buildout_script_paths.add(path) 164 | # cleanup, back to old directory 165 | os.chdir(curdir) 166 | return list(result) + list(buildout_script_paths) 167 | 168 | 169 | def _get_paths_from_buildout_script(evaluator, buildout_script): 170 | def load(buildout_script): 171 | try: 172 | with open(buildout_script, 'rb') as f: 173 | source = common.source_to_unicode(f.read()) 174 | except IOError: 175 | debug.dbg('Error trying to read buildout_script: %s', buildout_script) 176 | return 177 | 178 | p = Parser(evaluator.grammar, source, buildout_script) 179 | cache.save_parser(buildout_script, p) 180 | return p.module 181 | 182 | cached = cache.load_parser(buildout_script) 183 | module = cached and cached.module or load(buildout_script) 184 | if not module: 185 | return 186 | 187 | for path in _check_module(evaluator, module): 188 | yield path 189 | 190 | 191 | def traverse_parents(path): 192 | while True: 193 | new = os.path.dirname(path) 194 | if new == path: 195 | return 196 | path = new 197 | yield path 198 | 199 | 200 | def _get_parent_dir_with_file(path, filename): 201 | for parent in traverse_parents(path): 202 | if os.path.isfile(os.path.join(parent, filename)): 203 | return parent 204 | return None 205 | 206 | 207 | def _detect_django_path(module_path): 208 | """ Detects the path of the very well known Django library (if used) """ 209 | result = [] 210 | 211 | for parent in traverse_parents(module_path): 212 | with common.ignored(IOError): 213 | with open(parent + os.path.sep + 'manage.py'): 214 | debug.dbg('Found django path: %s', module_path) 215 | result.append(parent) 216 | return result 217 | 218 | 219 | def _get_buildout_scripts(module_path): 220 | """ 221 | if there is a 'buildout.cfg' file in one of the parent directories of the 222 | given module it will return a list of all files in the buildout bin 223 | directory that look like python files. 224 | 225 | :param module_path: absolute path to the module. 226 | :type module_path: str 227 | """ 228 | project_root = _get_parent_dir_with_file(module_path, 'buildout.cfg') 229 | if not project_root: 230 | return [] 231 | bin_path = os.path.join(project_root, 'bin') 232 | if not os.path.exists(bin_path): 233 | return [] 234 | extra_module_paths = [] 235 | for filename in os.listdir(bin_path): 236 | try: 237 | filepath = os.path.join(bin_path, filename) 238 | with open(filepath, 'r') as f: 239 | firstline = f.readline() 240 | if firstline.startswith('#!') and 'python' in firstline: 241 | extra_module_paths.append(filepath) 242 | except IOError as e: 243 | # either permission error or race cond. because file got deleted 244 | # ignore 245 | debug.warning(unicode(e)) 246 | continue 247 | return extra_module_paths 248 | -------------------------------------------------------------------------------- /lib/python-tools.coffee: -------------------------------------------------------------------------------- 1 | {Range, Point, CompositeDisposable} = require('atom'); 2 | path = require('path'); 3 | 4 | 5 | regexPatternIn = (pattern, list) -> 6 | for item in list 7 | if pattern.test(item) 8 | return true 9 | return false 10 | 11 | 12 | PythonTools = { 13 | config: { 14 | smartBlockSelection: { 15 | type: 'boolean', 16 | description: 'Do not select whitespace outside logical string blocks', 17 | default: true 18 | }, 19 | pythonPath: { 20 | type: 'string', 21 | default: '', 22 | title: 'Path to python directory', 23 | description: ''', 24 | Optional. Set it if default values are not working for you or you want to use specific 25 | python version. For example: `/usr/local/Cellar/python/2.7.3/bin` or `E:\\Python2.7` 26 | ''' 27 | } 28 | } 29 | 30 | subscriptions: null 31 | 32 | _issueReportLink: "https://github.com/michaelaquilina/python-tools/issues/new" 33 | 34 | activate: (state) -> 35 | # Events subscribed to in atom's system can be easily cleaned up with a CompositeDisposable 36 | this.subscriptions = new CompositeDisposable 37 | this.subscriptions.add( 38 | atom.commands.add( 39 | 'atom-text-editor[data-grammar="source python"]', 40 | {'python-tools:show-usages': () => this.jediToolsRequest('usages')} 41 | ) 42 | ) 43 | this.subscriptions.add( 44 | atom.commands.add( 45 | 'atom-text-editor[data-grammar="source python"]', 46 | {'python-tools:goto-definition': () => this.jediToolsRequest('gotoDef')} 47 | ) 48 | ) 49 | this.subscriptions.add( 50 | atom.commands.add( 51 | 'atom-text-editor[data-grammar="source python"]', 52 | {'python-tools:select-all-string': () => this.selectAllString()} 53 | ) 54 | ) 55 | 56 | env = process.env 57 | pythonPath = atom.config.get('python-tools.pythonPath') 58 | path_env = null 59 | 60 | if /^win/.test(process.platform) 61 | paths = [ 62 | 'C:\\Python2.7', 63 | 'C:\\Python3.4', 64 | 'C:\\Python34', 65 | 'C:\\Python3.5', 66 | 'C:\\Python35', 67 | 'C:\\Program Files (x86)\\Python 2.7', 68 | 'C:\\Program Files (x86)\\Python 3.4', 69 | 'C:\\Program Files (x86)\\Python 3.5', 70 | 'C:\\Program Files (x64)\\Python 2.7', 71 | 'C:\\Program Files (x64)\\Python 3.4', 72 | 'C:\\Program Files (x64)\\Python 3.5', 73 | 'C:\\Program Files\\Python 2.7', 74 | 'C:\\Program Files\\Python 3.4', 75 | 'C:\\Program Files\\Python 3.5' 76 | ] 77 | path_env = (env.Path or '') 78 | else 79 | paths = ['/usr/local/bin', '/usr/bin', '/bin', '/usr/sbin', '/sbin'] 80 | path_env = (env.PATH or '') 81 | 82 | path_env = path_env.split(path.delimiter) 83 | path_env.unshift(pythonPath if pythonPath and pythonPath not in path_env) 84 | for p in paths 85 | if p not in path_env 86 | path_env.push(p) 87 | env.PATH = path_env.join(path.delimiter) 88 | 89 | this.provider = require('child_process').spawn( 90 | 'python', [__dirname + '/tools.py'], env: env 91 | ) 92 | 93 | this.readline = require('readline').createInterface({ 94 | input: this.provider.stdout, 95 | output: this.provider.stdin 96 | }) 97 | 98 | this.provider.on('error', (err) => 99 | if err.code == 'ENOENT' 100 | atom.notifications.addWarning(""" 101 | python-tools was unable to find your machine's python executable. 102 | 103 | Please try set the path in package settings and then restart atom. 104 | 105 | If the issue persists please post an issue on 106 | #{this._issueReportLink} 107 | """, { 108 | detail: err, 109 | dismissable: true 110 | } 111 | ) 112 | else 113 | atom.notifications.addError(""" 114 | python-tools unexpected error. 115 | 116 | Please consider posting an issue on 117 | #{this._issueReportLink} 118 | """, { 119 | detail: err, 120 | dismissable: true 121 | } 122 | ) 123 | ) 124 | this.provider.on('exit', (code, signal) => 125 | if signal != 'SIGTERM' 126 | atom.notifications.addError( 127 | """ 128 | python-tools experienced an unexpected exit. 129 | 130 | Please consider posting an issue on 131 | #{this._issueReportLink} 132 | """, { 133 | detail: "exit with code #{code}, signal #{signal}", 134 | dismissable: true 135 | } 136 | ) 137 | ) 138 | 139 | deactivate: () -> 140 | this.subscriptions.dispose() 141 | this.provider.kill() 142 | this.readline.close() 143 | 144 | selectAllString: () -> 145 | editor = atom.workspace.getActiveTextEditor() 146 | bufferPosition = editor.getCursorBufferPosition() 147 | line = editor.lineTextForBufferRow(bufferPosition.row) 148 | 149 | scopeDescriptor = editor.scopeDescriptorForBufferPosition(bufferPosition) 150 | scopes = scopeDescriptor.getScopesArray() 151 | 152 | block = false 153 | if regexPatternIn(/string.quoted.single.single-line.*/, scopes) 154 | delimiter = '\'' 155 | else if regexPatternIn(/string.quoted.double.single-line.*/, scopes) 156 | delimiter = '"' 157 | else if regexPatternIn(/string.quoted.double.block.*/, scopes) 158 | delimiter = '"""' 159 | block = true 160 | else if regexPatternIn(/string.quoted.single.block.*/, scopes) 161 | delimiter = '\'\'\'' 162 | block = true 163 | else 164 | return 165 | 166 | if not block 167 | start = end = bufferPosition.column 168 | 169 | while line[start] != delimiter 170 | start = start - 1 171 | if start < 0 172 | return 173 | 174 | while line[end] != delimiter 175 | end = end + 1 176 | if end == line.length 177 | return 178 | 179 | editor.setSelectedBufferRange(new Range( 180 | new Point(bufferPosition.row, start + 1), 181 | new Point(bufferPosition.row, end), 182 | )) 183 | else 184 | start = end = bufferPosition.row 185 | start_index = end_index = -1 186 | 187 | # Detect if we are at the boundaries of the block string 188 | delim_index = line.indexOf(delimiter) 189 | 190 | if delim_index != -1 191 | scopes = editor.scopeDescriptorForBufferPosition(new Point(start, delim_index)) 192 | scopes = scopes.getScopesArray() 193 | 194 | # We are at the beginning of the block 195 | if regexPatternIn(/punctuation.definition.string.begin.*/, scopes) 196 | start_index = line.indexOf(delimiter) 197 | while end_index == -1 198 | end = end + 1 199 | line = editor.lineTextForBufferRow(end) 200 | end_index = line.indexOf(delimiter) 201 | 202 | # We are the end of the block 203 | else if regexPatternIn(/punctuation.definition.string.end.*/, scopes) 204 | end_index = line.indexOf(delimiter) 205 | while start_index == -1 206 | start = start - 1 207 | line = editor.lineTextForBufferRow(start) 208 | start_index = line.indexOf(delimiter) 209 | 210 | else 211 | # We are neither at the beginning or the end of the block 212 | while end_index == -1 213 | end = end + 1 214 | line = editor.lineTextForBufferRow(end) 215 | end_index = line.indexOf(delimiter) 216 | while start_index == -1 217 | start = start - 1 218 | line = editor.lineTextForBufferRow(start) 219 | start_index = line.indexOf(delimiter) 220 | 221 | if atom.config.get('python-tools.smartBlockSelection') 222 | # Smart block selections 223 | selections = [new Range( 224 | new Point(start, start_index + delimiter.length), 225 | new Point(start, editor.lineTextForBufferRow(start).length), 226 | )] 227 | 228 | for i in [start + 1 ... end] by 1 229 | line = editor.lineTextForBufferRow(i) 230 | trimmed = line.replace(/^\s+/, "") # left trim 231 | selections.push(new Range( 232 | new Point(i, line.length - trimmed.length), 233 | new Point(i, line.length), 234 | )) 235 | 236 | line = editor.lineTextForBufferRow(end) 237 | trimmed = line.replace(/^\s+/, "") # left trim 238 | 239 | selections.push(new Range( 240 | new Point(end, line.length - trimmed.length), 241 | new Point(end, end_index), 242 | )) 243 | 244 | editor.setSelectedBufferRanges(selections.filter (range) -> not range.isEmpty()) 245 | else 246 | editor.setSelectedBufferRange(new Range( 247 | new Point(start, start_index + delimiter.length), 248 | new Point(end, end_index), 249 | )) 250 | 251 | handleJediToolsResponse: (response) -> 252 | if 'error' of response 253 | console.error(response['error']) 254 | atom.notifications.addError(response['error']) 255 | return 256 | 257 | if response['definitions'].length > 0 258 | editor = atom.workspace.getActiveTextEditor() 259 | 260 | if response['type'] == 'usages' 261 | path = editor.getPath() 262 | selections = [] 263 | for item in response['definitions'] 264 | if item['path'] == path 265 | selections.push(new Range( 266 | new Point(item['line'] - 1, item['col']), 267 | new Point(item['line'] - 1, item['col'] + item['name'].length), # Use string length 268 | )) 269 | 270 | editor.setSelectedBufferRanges(selections) 271 | 272 | else if response['type'] == 'gotoDef' 273 | first_def = response['definitions'][0] 274 | 275 | line = first_def['line'] 276 | column = first_def['col'] 277 | 278 | if line != null and column != null 279 | options = { 280 | initialLine: line, 281 | initialColumn: column, 282 | searchAllPanes: true 283 | } 284 | 285 | atom.workspace.open(first_def['path'], options).then((editor) -> 286 | editor.scrollToCursorPosition() 287 | ) 288 | else 289 | atom.notifications.addError( 290 | "python-tools error. #{this._issueReportLink}", { 291 | detail: JSON.stringify(response), 292 | dismissable: true 293 | } 294 | ) 295 | else 296 | atom.notifications.addInfo("python-tools could not find any results!") 297 | 298 | jediToolsRequest: (type) -> 299 | editor = atom.workspace.getActiveTextEditor() 300 | grammar = editor.getGrammar() 301 | 302 | bufferPosition = editor.getCursorBufferPosition() 303 | 304 | payload = { 305 | type: type, 306 | path: editor.getPath(), 307 | source: editor.getText(), 308 | line: bufferPosition.row, 309 | col: bufferPosition.column, 310 | project_paths: atom.project.getPaths() 311 | } 312 | 313 | # This is needed for the promise to work correctly 314 | handleJediToolsResponse = this.handleJediToolsResponse 315 | readline = this.readline 316 | 317 | return new Promise((resolve, reject) -> 318 | response = readline.question("#{JSON.stringify(payload)}\n", (response) -> 319 | handleJediToolsResponse(JSON.parse(response)) 320 | resolve() 321 | ) 322 | ) 323 | } 324 | 325 | module.exports = PythonTools 326 | -------------------------------------------------------------------------------- /lib/jedi/cache.py: -------------------------------------------------------------------------------- 1 | """ 2 | This caching is very important for speed and memory optimizations. There's 3 | nothing really spectacular, just some decorators. The following cache types are 4 | available: 5 | 6 | - module caching (`load_parser` and `save_parser`), which uses pickle and is 7 | really important to assure low load times of modules like ``numpy``. 8 | - ``time_cache`` can be used to cache something for just a limited time span, 9 | which can be useful if there's user interaction and the user cannot react 10 | faster than a certain time. 11 | 12 | This module is one of the reasons why |jedi| is not thread-safe. As you can see 13 | there are global variables, which are holding the cache information. Some of 14 | these variables are being cleaned after every API usage. 15 | """ 16 | import time 17 | import os 18 | import sys 19 | import json 20 | import hashlib 21 | import gc 22 | import inspect 23 | import shutil 24 | import re 25 | try: 26 | import cPickle as pickle 27 | except ImportError: 28 | import pickle 29 | 30 | from jedi import settings 31 | from jedi import common 32 | from jedi import debug 33 | 34 | _time_caches = {} 35 | 36 | # for fast_parser, should not be deleted 37 | parser_cache = {} 38 | 39 | 40 | class ParserCacheItem(object): 41 | def __init__(self, parser, change_time=None): 42 | self.parser = parser 43 | if change_time is None: 44 | change_time = time.time() 45 | self.change_time = change_time 46 | 47 | 48 | def clear_time_caches(delete_all=False): 49 | """ Jedi caches many things, that should be completed after each completion 50 | finishes. 51 | 52 | :param delete_all: Deletes also the cache that is normally not deleted, 53 | like parser cache, which is important for faster parsing. 54 | """ 55 | global _time_caches 56 | 57 | if delete_all: 58 | for cache in _time_caches.values(): 59 | cache.clear() 60 | parser_cache.clear() 61 | else: 62 | # normally just kill the expired entries, not all 63 | for tc in _time_caches.values(): 64 | # check time_cache for expired entries 65 | for key, (t, value) in list(tc.items()): 66 | if t < time.time(): 67 | # delete expired entries 68 | del tc[key] 69 | 70 | 71 | def time_cache(time_add_setting): 72 | """ 73 | s 74 | This decorator works as follows: Call it with a setting and after that 75 | use the function with a callable that returns the key. 76 | But: This function is only called if the key is not available. After a 77 | certain amount of time (`time_add_setting`) the cache is invalid. 78 | """ 79 | def _temp(key_func): 80 | dct = {} 81 | _time_caches[time_add_setting] = dct 82 | 83 | def wrapper(*args, **kwargs): 84 | generator = key_func(*args, **kwargs) 85 | key = next(generator) 86 | try: 87 | expiry, value = dct[key] 88 | if expiry > time.time(): 89 | return value 90 | except KeyError: 91 | pass 92 | 93 | value = next(generator) 94 | time_add = getattr(settings, time_add_setting) 95 | if key is not None: 96 | dct[key] = time.time() + time_add, value 97 | return value 98 | return wrapper 99 | return _temp 100 | 101 | 102 | @time_cache("call_signatures_validity") 103 | def cache_call_signatures(evaluator, call, source, user_pos): 104 | """This function calculates the cache key.""" 105 | index = user_pos[0] - 1 106 | lines = common.splitlines(source) 107 | 108 | before_cursor = lines[index][:user_pos[1]] 109 | other_lines = lines[call.start_pos[0]:index] 110 | whole = '\n'.join(other_lines + [before_cursor]) 111 | before_bracket = re.match(r'.*\(', whole, re.DOTALL) 112 | 113 | module_path = call.get_parent_until().path 114 | yield None if module_path is None else (module_path, before_bracket, call.start_pos) 115 | yield evaluator.eval_element(call) 116 | 117 | 118 | def underscore_memoization(func): 119 | """ 120 | Decorator for methods:: 121 | 122 | class A(object): 123 | def x(self): 124 | if self._x: 125 | self._x = 10 126 | return self._x 127 | 128 | Becomes:: 129 | 130 | class A(object): 131 | @underscore_memoization 132 | def x(self): 133 | return 10 134 | 135 | A now has an attribute ``_x`` written by this decorator. 136 | """ 137 | name = '_' + func.__name__ 138 | 139 | def wrapper(self): 140 | try: 141 | return getattr(self, name) 142 | except AttributeError: 143 | result = func(self) 144 | if inspect.isgenerator(result): 145 | result = list(result) 146 | setattr(self, name, result) 147 | return result 148 | 149 | return wrapper 150 | 151 | 152 | def memoize_method(method): 153 | """A normal memoize function.""" 154 | def wrapper(self, *args, **kwargs): 155 | dct = self.__dict__.setdefault('_memoize_method_dct', {}) 156 | key = (args, frozenset(kwargs.items())) 157 | try: 158 | return dct[key] 159 | except KeyError: 160 | result = method(self, *args, **kwargs) 161 | dct[key] = result 162 | return result 163 | return wrapper 164 | 165 | 166 | def cache_star_import(func): 167 | @time_cache("star_import_cache_validity") 168 | def wrapper(self): 169 | yield self.base # The cache key 170 | yield func(self) 171 | return wrapper 172 | 173 | 174 | def _invalidate_star_import_cache_module(module, only_main=False): 175 | """ Important if some new modules are being reparsed """ 176 | try: 177 | t, modules = _time_caches['star_import_cache_validity'][module] 178 | except KeyError: 179 | pass 180 | else: 181 | del _time_caches['star_import_cache_validity'][module] 182 | 183 | 184 | def invalidate_star_import_cache(path): 185 | """On success returns True.""" 186 | try: 187 | parser_cache_item = parser_cache[path] 188 | except KeyError: 189 | pass 190 | else: 191 | _invalidate_star_import_cache_module(parser_cache_item.parser.module) 192 | 193 | 194 | def load_parser(path): 195 | """ 196 | Returns the module or None, if it fails. 197 | """ 198 | p_time = os.path.getmtime(path) if path else None 199 | try: 200 | parser_cache_item = parser_cache[path] 201 | if not path or p_time <= parser_cache_item.change_time: 202 | return parser_cache_item.parser 203 | else: 204 | # In case there is already a module cached and this module 205 | # has to be reparsed, we also need to invalidate the import 206 | # caches. 207 | _invalidate_star_import_cache_module(parser_cache_item.parser.module) 208 | except KeyError: 209 | if settings.use_filesystem_cache: 210 | return ParserPickling.load_parser(path, p_time) 211 | 212 | 213 | def save_parser(path, parser, pickling=True): 214 | try: 215 | p_time = None if path is None else os.path.getmtime(path) 216 | except OSError: 217 | p_time = None 218 | pickling = False 219 | 220 | item = ParserCacheItem(parser, p_time) 221 | parser_cache[path] = item 222 | if settings.use_filesystem_cache and pickling: 223 | ParserPickling.save_parser(path, item) 224 | 225 | 226 | class ParserPickling(object): 227 | 228 | version = 24 229 | """ 230 | Version number (integer) for file system cache. 231 | 232 | Increment this number when there are any incompatible changes in 233 | parser representation classes. For example, the following changes 234 | are regarded as incompatible. 235 | 236 | - Class name is changed. 237 | - Class is moved to another module. 238 | - Defined slot of the class is changed. 239 | """ 240 | 241 | def __init__(self): 242 | self.__index = None 243 | self.py_tag = 'cpython-%s%s' % sys.version_info[:2] 244 | """ 245 | Short name for distinguish Python implementations and versions. 246 | 247 | It's like `sys.implementation.cache_tag` but for Python < 3.3 248 | we generate something similar. See: 249 | http://docs.python.org/3/library/sys.html#sys.implementation 250 | 251 | .. todo:: Detect interpreter (e.g., PyPy). 252 | """ 253 | 254 | def load_parser(self, path, original_changed_time): 255 | try: 256 | pickle_changed_time = self._index[path] 257 | except KeyError: 258 | return None 259 | if original_changed_time is not None \ 260 | and pickle_changed_time < original_changed_time: 261 | # the pickle file is outdated 262 | return None 263 | 264 | with open(self._get_hashed_path(path), 'rb') as f: 265 | try: 266 | gc.disable() 267 | parser_cache_item = pickle.load(f) 268 | finally: 269 | gc.enable() 270 | 271 | debug.dbg('pickle loaded: %s', path) 272 | parser_cache[path] = parser_cache_item 273 | return parser_cache_item.parser 274 | 275 | def save_parser(self, path, parser_cache_item): 276 | self.__index = None 277 | try: 278 | files = self._index 279 | except KeyError: 280 | files = {} 281 | self._index = files 282 | 283 | with open(self._get_hashed_path(path), 'wb') as f: 284 | pickle.dump(parser_cache_item, f, pickle.HIGHEST_PROTOCOL) 285 | files[path] = parser_cache_item.change_time 286 | 287 | self._flush_index() 288 | 289 | @property 290 | def _index(self): 291 | if self.__index is None: 292 | try: 293 | with open(self._get_path('index.json')) as f: 294 | data = json.load(f) 295 | except (IOError, ValueError): 296 | self.__index = {} 297 | else: 298 | # 0 means version is not defined (= always delete cache): 299 | if data.get('version', 0) != self.version: 300 | self.clear_cache() 301 | self.__index = {} 302 | else: 303 | self.__index = data['index'] 304 | return self.__index 305 | 306 | def _remove_old_modules(self): 307 | # TODO use 308 | change = False 309 | if change: 310 | self._flush_index(self) 311 | self._index # reload index 312 | 313 | def _flush_index(self): 314 | data = {'version': self.version, 'index': self._index} 315 | with open(self._get_path('index.json'), 'w') as f: 316 | json.dump(data, f) 317 | self.__index = None 318 | 319 | def clear_cache(self): 320 | shutil.rmtree(self._cache_directory()) 321 | 322 | def _get_hashed_path(self, path): 323 | return self._get_path('%s.pkl' % hashlib.md5(path.encode("utf-8")).hexdigest()) 324 | 325 | def _get_path(self, file): 326 | dir = self._cache_directory() 327 | if not os.path.exists(dir): 328 | os.makedirs(dir) 329 | return os.path.join(dir, file) 330 | 331 | def _cache_directory(self): 332 | return os.path.join(settings.cache_directory, self.py_tag) 333 | 334 | 335 | # is a singleton 336 | ParserPickling = ParserPickling() 337 | -------------------------------------------------------------------------------- /lib/jedi/evaluate/analysis.py: -------------------------------------------------------------------------------- 1 | """ 2 | Module for statical analysis. 3 | """ 4 | from jedi import debug 5 | from jedi.parser import tree 6 | from jedi.evaluate.compiled import CompiledObject 7 | 8 | 9 | CODES = { 10 | 'attribute-error': (1, AttributeError, 'Potential AttributeError.'), 11 | 'name-error': (2, NameError, 'Potential NameError.'), 12 | 'import-error': (3, ImportError, 'Potential ImportError.'), 13 | 'type-error-generator': (4, TypeError, "TypeError: 'generator' object is not subscriptable."), 14 | 'type-error-too-many-arguments': (5, TypeError, None), 15 | 'type-error-too-few-arguments': (6, TypeError, None), 16 | 'type-error-keyword-argument': (7, TypeError, None), 17 | 'type-error-multiple-values': (8, TypeError, None), 18 | 'type-error-star-star': (9, TypeError, None), 19 | 'type-error-star': (10, TypeError, None), 20 | 'type-error-operation': (11, TypeError, None), 21 | } 22 | 23 | 24 | class Error(object): 25 | def __init__(self, name, module_path, start_pos, message=None): 26 | self.path = module_path 27 | self._start_pos = start_pos 28 | self.name = name 29 | if message is None: 30 | message = CODES[self.name][2] 31 | self.message = message 32 | 33 | @property 34 | def line(self): 35 | return self._start_pos[0] 36 | 37 | @property 38 | def column(self): 39 | return self._start_pos[1] 40 | 41 | @property 42 | def code(self): 43 | # The class name start 44 | first = self.__class__.__name__[0] 45 | return first + str(CODES[self.name][0]) 46 | 47 | def __unicode__(self): 48 | return '%s:%s:%s: %s %s' % (self.path, self.line, self.column, 49 | self.code, self.message) 50 | 51 | def __str__(self): 52 | return self.__unicode__() 53 | 54 | def __eq__(self, other): 55 | return (self.path == other.path and self.name == other.name 56 | and self._start_pos == other._start_pos) 57 | 58 | def __ne__(self, other): 59 | return not self.__eq__(other) 60 | 61 | def __hash__(self): 62 | return hash((self.path, self._start_pos, self.name)) 63 | 64 | def __repr__(self): 65 | return '<%s %s: %s@%s,%s>' % (self.__class__.__name__, 66 | self.name, self.path, 67 | self._start_pos[0], self._start_pos[1]) 68 | 69 | 70 | class Warning(Error): 71 | pass 72 | 73 | 74 | def add(evaluator, name, jedi_obj, message=None, typ=Error, payload=None): 75 | from jedi.evaluate.iterable import MergedNodes 76 | while isinstance(jedi_obj, MergedNodes): 77 | if len(jedi_obj) != 1: 78 | # TODO is this kosher? 79 | return 80 | jedi_obj = list(jedi_obj)[0] 81 | 82 | exception = CODES[name][1] 83 | if _check_for_exception_catch(evaluator, jedi_obj, exception, payload): 84 | return 85 | 86 | module_path = jedi_obj.get_parent_until().path 87 | instance = typ(name, module_path, jedi_obj.start_pos, message) 88 | debug.warning(str(instance)) 89 | evaluator.analysis.append(instance) 90 | 91 | 92 | def _check_for_setattr(instance): 93 | """ 94 | Check if there's any setattr method inside an instance. If so, return True. 95 | """ 96 | module = instance.get_parent_until() 97 | try: 98 | stmts = module.used_names['setattr'] 99 | except KeyError: 100 | return False 101 | 102 | return any(instance.start_pos < stmt.start_pos < instance.end_pos 103 | for stmt in stmts) 104 | 105 | 106 | def add_attribute_error(evaluator, scope, name): 107 | message = ('AttributeError: %s has no attribute %s.' % (scope, name)) 108 | from jedi.evaluate.representation import Instance 109 | # Check for __getattr__/__getattribute__ existance and issue a warning 110 | # instead of an error, if that happens. 111 | if isinstance(scope, Instance): 112 | typ = Warning 113 | try: 114 | scope.get_subscope_by_name('__getattr__') 115 | except KeyError: 116 | try: 117 | scope.get_subscope_by_name('__getattribute__') 118 | except KeyError: 119 | if not _check_for_setattr(scope): 120 | typ = Error 121 | else: 122 | typ = Error 123 | 124 | payload = scope, name 125 | add(evaluator, 'attribute-error', name, message, typ, payload) 126 | 127 | 128 | def _check_for_exception_catch(evaluator, jedi_obj, exception, payload=None): 129 | """ 130 | Checks if a jedi object (e.g. `Statement`) sits inside a try/catch and 131 | doesn't count as an error (if equal to `exception`). 132 | Also checks `hasattr` for AttributeErrors and uses the `payload` to compare 133 | it. 134 | Returns True if the exception was catched. 135 | """ 136 | def check_match(cls, exception): 137 | try: 138 | return isinstance(cls, CompiledObject) and issubclass(exception, cls.obj) 139 | except TypeError: 140 | return False 141 | 142 | def check_try_for_except(obj, exception): 143 | # Only nodes in try 144 | iterator = iter(obj.children) 145 | for branch_type in iterator: 146 | colon = next(iterator) 147 | suite = next(iterator) 148 | if branch_type == 'try' \ 149 | and not (branch_type.start_pos < jedi_obj.start_pos <= suite.end_pos): 150 | return False 151 | 152 | for node in obj.except_clauses(): 153 | if node is None: 154 | return True # An exception block that catches everything. 155 | else: 156 | except_classes = evaluator.eval_element(node) 157 | for cls in except_classes: 158 | from jedi.evaluate import iterable 159 | if isinstance(cls, iterable.Array) and cls.type == 'tuple': 160 | # multiple exceptions 161 | for c in cls.values(): 162 | if check_match(c, exception): 163 | return True 164 | else: 165 | if check_match(cls, exception): 166 | return True 167 | 168 | def check_hasattr(node, suite): 169 | try: 170 | assert suite.start_pos <= jedi_obj.start_pos < suite.end_pos 171 | assert node.type == 'power' 172 | base = node.children[0] 173 | assert base.type == 'name' and base.value == 'hasattr' 174 | trailer = node.children[1] 175 | assert trailer.type == 'trailer' 176 | arglist = trailer.children[1] 177 | assert arglist.type == 'arglist' 178 | from jedi.evaluate.param import Arguments 179 | args = list(Arguments(evaluator, arglist).unpack()) 180 | # Arguments should be very simple 181 | assert len(args) == 2 182 | 183 | # Check name 184 | key, values = args[1] 185 | assert len(values) == 1 186 | names = evaluator.eval_element(values[0]) 187 | assert len(names) == 1 and isinstance(names[0], CompiledObject) 188 | assert names[0].obj == str(payload[1]) 189 | 190 | # Check objects 191 | key, values = args[0] 192 | assert len(values) == 1 193 | objects = evaluator.eval_element(values[0]) 194 | return payload[0] in objects 195 | except AssertionError: 196 | return False 197 | 198 | obj = jedi_obj 199 | while obj is not None and not obj.isinstance(tree.Function, tree.Class): 200 | if obj.isinstance(tree.Flow): 201 | # try/except catch check 202 | if obj.isinstance(tree.TryStmt) and check_try_for_except(obj, exception): 203 | return True 204 | # hasattr check 205 | if exception == AttributeError and obj.isinstance(tree.IfStmt, tree.WhileStmt): 206 | if check_hasattr(obj.children[1], obj.children[3]): 207 | return True 208 | obj = obj.parent 209 | 210 | return False 211 | 212 | 213 | def get_module_statements(module): 214 | """ 215 | Returns the statements used in a module. All these statements should be 216 | evaluated to check for potential exceptions. 217 | """ 218 | def check_children(node): 219 | try: 220 | children = node.children 221 | except AttributeError: 222 | return [] 223 | else: 224 | nodes = [] 225 | for child in children: 226 | nodes += check_children(child) 227 | if child.type == 'trailer': 228 | c = child.children 229 | if c[0] == '(' and c[1] != ')': 230 | if c[1].type != 'arglist': 231 | if c[1].type == 'argument': 232 | nodes.append(c[1].children[-1]) 233 | else: 234 | nodes.append(c[1]) 235 | else: 236 | for argument in c[1].children: 237 | if argument.type == 'argument': 238 | nodes.append(argument.children[-1]) 239 | elif argument.type != 'operator': 240 | nodes.append(argument) 241 | return nodes 242 | 243 | def add_nodes(nodes): 244 | new = set() 245 | for node in nodes: 246 | if isinstance(node, tree.Flow): 247 | children = node.children 248 | if node.type == 'for_stmt': 249 | children = children[2:] # Don't want to include the names. 250 | # Pick the suite/simple_stmt. 251 | new |= add_nodes(children) 252 | elif node.type in ('simple_stmt', 'suite'): 253 | new |= add_nodes(node.children) 254 | elif node.type in ('return_stmt', 'yield_expr'): 255 | try: 256 | new.add(node.children[1]) 257 | except IndexError: 258 | pass 259 | elif node.type not in ('whitespace', 'operator', 'keyword', 260 | 'parameters', 'decorated', 'except_clause') \ 261 | and not isinstance(node, (tree.ClassOrFunc, tree.Import)): 262 | new.add(node) 263 | 264 | try: 265 | children = node.children 266 | except AttributeError: 267 | pass 268 | else: 269 | for next_node in children: 270 | new.update(check_children(node)) 271 | if next_node.type != 'keyword' and node.type != 'expr_stmt': 272 | new.add(node) 273 | return new 274 | 275 | nodes = set() 276 | import_names = set() 277 | decorated_funcs = [] 278 | for scope in module.walk(): 279 | for imp in set(scope.imports): 280 | import_names |= set(imp.get_defined_names()) 281 | if imp.is_nested(): 282 | import_names |= set(path[-1] for path in imp.paths()) 283 | 284 | children = scope.children 285 | if isinstance(scope, tree.ClassOrFunc): 286 | children = children[2:] # We don't want to include the class name. 287 | nodes |= add_nodes(children) 288 | 289 | for flow in scope.flows: 290 | if flow.type == 'for_stmt': 291 | nodes.add(flow.children[3]) 292 | elif flow.type == 'try_stmt': 293 | nodes.update(e for e in flow.except_clauses() if e is not None) 294 | 295 | try: 296 | decorators = scope.get_decorators() 297 | except AttributeError: 298 | pass 299 | else: 300 | if decorators: 301 | decorated_funcs.append(scope) 302 | return nodes, import_names, decorated_funcs 303 | -------------------------------------------------------------------------------- /lib/jedi/parser/tokenize.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | This tokenizer has been copied from the ``tokenize.py`` standard library 4 | tokenizer. The reason was simple: The standard library tokenizer fails 5 | if the indentation is not right. The fast parser of jedi however requires 6 | "wrong" indentation. 7 | 8 | Basically this is a stripped down version of the standard library module, so 9 | you can read the documentation there. Additionally we included some speed and 10 | memory optimizations here. 11 | """ 12 | from __future__ import absolute_import 13 | 14 | import string 15 | import re 16 | from io import StringIO 17 | from jedi.parser.token import (tok_name, N_TOKENS, ENDMARKER, STRING, NUMBER, 18 | NAME, OP, ERRORTOKEN, NEWLINE, INDENT, DEDENT) 19 | from jedi._compatibility import is_py3 20 | 21 | 22 | cookie_re = re.compile("coding[:=]\s*([-\w.]+)") 23 | 24 | 25 | if is_py3: 26 | # Python 3 has str.isidentifier() to check if a char is a valid identifier 27 | is_identifier = str.isidentifier 28 | else: 29 | namechars = string.ascii_letters + '_' 30 | is_identifier = lambda s: s in namechars 31 | 32 | 33 | COMMENT = N_TOKENS 34 | tok_name[COMMENT] = 'COMMENT' 35 | 36 | 37 | def group(*choices): 38 | return '(' + '|'.join(choices) + ')' 39 | 40 | 41 | def maybe(*choices): 42 | return group(*choices) + '?' 43 | 44 | 45 | # Note: we use unicode matching for names ("\w") but ascii matching for 46 | # number literals. 47 | whitespace = r'[ \f\t]*' 48 | comment = r'#[^\r\n]*' 49 | name = r'\w+' 50 | 51 | hex_number = r'0[xX][0-9a-fA-F]+' 52 | bin_number = r'0[bB][01]+' 53 | oct_number = r'0[oO][0-7]+' 54 | dec_number = r'(?:0+|[1-9][0-9]*)' 55 | int_number = group(hex_number, bin_number, oct_number, dec_number) 56 | exponent = r'[eE][-+]?[0-9]+' 57 | point_float = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(exponent) 58 | Expfloat = r'[0-9]+' + exponent 59 | float_number = group(point_float, Expfloat) 60 | imag_number = group(r'[0-9]+[jJ]', float_number + r'[jJ]') 61 | number = group(imag_number, float_number, int_number) 62 | 63 | # Tail end of ' string. 64 | single = r"[^'\\]*(?:\\.[^'\\]*)*'" 65 | # Tail end of " string. 66 | double = r'[^"\\]*(?:\\.[^"\\]*)*"' 67 | # Tail end of ''' string. 68 | single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" 69 | # Tail end of """ string. 70 | double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' 71 | triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""') 72 | # Single-line ' or " string. 73 | 74 | # Because of leftmost-then-longest match semantics, be sure to put the 75 | # longest operators first (e.g., if = came before ==, == would get 76 | # recognized as two instances of =). 77 | operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=", 78 | r"//=?", r"->", 79 | r"[+\-*/%&|^=<>]=?", 80 | r"~") 81 | 82 | bracket = '[][(){}]' 83 | special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]') 84 | funny = group(operator, bracket, special) 85 | 86 | # First (or only) line of ' or " string. 87 | cont_str = group(r"[bBuU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + 88 | group("'", r'\\\r?\n'), 89 | r'[bBuU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + 90 | group('"', r'\\\r?\n')) 91 | pseudo_extras = group(r'\\\r?\n', comment, triple) 92 | pseudo_token = group(whitespace) + \ 93 | group(pseudo_extras, number, funny, cont_str, name) 94 | 95 | 96 | def _compile(expr): 97 | return re.compile(expr, re.UNICODE) 98 | 99 | 100 | pseudoprog, single3prog, double3prog = map( 101 | _compile, (pseudo_token, single3, double3)) 102 | 103 | endprogs = {"'": _compile(single), '"': _compile(double), 104 | "'''": single3prog, '"""': double3prog, 105 | "r'''": single3prog, 'r"""': double3prog, 106 | "b'''": single3prog, 'b"""': double3prog, 107 | "u'''": single3prog, 'u"""': double3prog, 108 | "R'''": single3prog, 'R"""': double3prog, 109 | "B'''": single3prog, 'B"""': double3prog, 110 | "U'''": single3prog, 'U"""': double3prog, 111 | "br'''": single3prog, 'br"""': double3prog, 112 | "bR'''": single3prog, 'bR"""': double3prog, 113 | "Br'''": single3prog, 'Br"""': double3prog, 114 | "BR'''": single3prog, 'BR"""': double3prog, 115 | "ur'''": single3prog, 'ur"""': double3prog, 116 | "uR'''": single3prog, 'uR"""': double3prog, 117 | "Ur'''": single3prog, 'Ur"""': double3prog, 118 | "UR'''": single3prog, 'UR"""': double3prog, 119 | 'r': None, 'R': None, 'b': None, 'B': None} 120 | 121 | triple_quoted = {} 122 | for t in ("'''", '"""', 123 | "r'''", 'r"""', "R'''", 'R"""', 124 | "b'''", 'b"""', "B'''", 'B"""', 125 | "u'''", 'u"""', "U'''", 'U"""', 126 | "br'''", 'br"""', "Br'''", 'Br"""', 127 | "bR'''", 'bR"""', "BR'''", 'BR"""', 128 | "ur'''", 'ur"""', "Ur'''", 'Ur"""', 129 | "uR'''", 'uR"""', "UR'''", 'UR"""'): 130 | triple_quoted[t] = t 131 | single_quoted = {} 132 | for t in ("'", '"', 133 | "r'", 'r"', "R'", 'R"', 134 | "b'", 'b"', "B'", 'B"', 135 | "u'", 'u"', "U'", 'U"', 136 | "br'", 'br"', "Br'", 'Br"', 137 | "bR'", 'bR"', "BR'", 'BR"', 138 | "ur'", 'ur"', "Ur'", 'Ur"', 139 | "uR'", 'uR"', "UR'", 'UR"'): 140 | single_quoted[t] = t 141 | 142 | del _compile 143 | 144 | tabsize = 8 145 | 146 | ALWAYS_BREAK_TOKENS = (';', 'import', 'from', 'class', 'def', 'try', 'except', 147 | 'finally', 'while', 'return') 148 | 149 | 150 | def source_tokens(source): 151 | """Generate tokens from a the source code (string).""" 152 | source = source + '\n' # end with \n, because the parser needs it 153 | readline = StringIO(source).readline 154 | return generate_tokens(readline) 155 | 156 | 157 | def generate_tokens(readline): 158 | """ 159 | A heavily modified Python standard library tokenizer. 160 | 161 | Additionally to the default information, yields also the prefix of each 162 | token. This idea comes from lib2to3. The prefix contains all information 163 | that is irrelevant for the parser like newlines in parentheses or comments. 164 | """ 165 | paren_level = 0 # count parentheses 166 | indents = [0] 167 | lnum = 0 168 | numchars = '0123456789' 169 | contstr = '' 170 | contline = None 171 | # We start with a newline. This makes indent at the first position 172 | # possible. It's not valid Python, but still better than an INDENT in the 173 | # second line (and not in the first). This makes quite a few things in 174 | # Jedi's fast parser possible. 175 | new_line = True 176 | prefix = '' # Should never be required, but here for safety 177 | additional_prefix = '' 178 | while True: # loop over lines in stream 179 | line = readline() # readline returns empty when finished. See StringIO 180 | if not line: 181 | if contstr: 182 | yield ERRORTOKEN, contstr, contstr_start, prefix 183 | break 184 | 185 | lnum += 1 186 | pos, max = 0, len(line) 187 | 188 | if contstr: # continued string 189 | endmatch = endprog.match(line) 190 | if endmatch: 191 | pos = endmatch.end(0) 192 | yield STRING, contstr + line[:pos], contstr_start, prefix 193 | contstr = '' 194 | contline = None 195 | else: 196 | contstr = contstr + line 197 | contline = contline + line 198 | continue 199 | 200 | while pos < max: 201 | pseudomatch = pseudoprog.match(line, pos) 202 | if not pseudomatch: # scan for tokens 203 | txt = line[pos] 204 | if line[pos] in '"\'': 205 | # If a literal starts but doesn't end the whole rest of the 206 | # line is an error token. 207 | txt = line[pos:] 208 | yield ERRORTOKEN, txt, (lnum, pos), prefix 209 | pos += 1 210 | continue 211 | 212 | prefix = additional_prefix + pseudomatch.group(1) 213 | additional_prefix = '' 214 | start, pos = pseudomatch.span(2) 215 | spos = (lnum, start) 216 | token, initial = line[start:pos], line[start] 217 | 218 | if new_line and initial not in '\r\n#': 219 | new_line = False 220 | if paren_level == 0: 221 | if start > indents[-1]: 222 | yield INDENT, '', spos, '' 223 | indents.append(start) 224 | while start < indents[-1]: 225 | yield DEDENT, '', spos, '' 226 | indents.pop() 227 | 228 | if (initial in numchars or # ordinary number 229 | (initial == '.' and token != '.' and token != '...')): 230 | yield NUMBER, token, spos, prefix 231 | elif initial in '\r\n': 232 | if not new_line and paren_level == 0: 233 | yield NEWLINE, token, spos, prefix 234 | else: 235 | additional_prefix = prefix + token 236 | new_line = True 237 | elif initial == '#': # Comments 238 | assert not token.endswith("\n") 239 | additional_prefix = prefix + token 240 | elif token in triple_quoted: 241 | endprog = endprogs[token] 242 | endmatch = endprog.match(line, pos) 243 | if endmatch: # all on one line 244 | pos = endmatch.end(0) 245 | token = line[start:pos] 246 | yield STRING, token, spos, prefix 247 | else: 248 | contstr_start = (lnum, start) # multiple lines 249 | contstr = line[start:] 250 | contline = line 251 | break 252 | elif initial in single_quoted or \ 253 | token[:2] in single_quoted or \ 254 | token[:3] in single_quoted: 255 | if token[-1] == '\n': # continued string 256 | contstr_start = lnum, start 257 | endprog = (endprogs.get(initial) or endprogs.get(token[1]) 258 | or endprogs.get(token[2])) 259 | contstr = line[start:] 260 | contline = line 261 | break 262 | else: # ordinary string 263 | yield STRING, token, spos, prefix 264 | elif is_identifier(initial): # ordinary name 265 | if token in ALWAYS_BREAK_TOKENS: 266 | paren_level = 0 267 | while True: 268 | indent = indents.pop() 269 | if indent > start: 270 | yield DEDENT, '', spos, '' 271 | else: 272 | indents.append(indent) 273 | break 274 | yield NAME, token, spos, prefix 275 | elif initial == '\\' and line[start:] in ('\\\n', '\\\r\n'): # continued stmt 276 | additional_prefix += prefix + line[start:] 277 | break 278 | else: 279 | if token in '([{': 280 | paren_level += 1 281 | elif token in ')]}': 282 | paren_level -= 1 283 | yield OP, token, spos, prefix 284 | 285 | end_pos = (lnum, max - 1) 286 | # As the last position we just take the maximally possible position. We 287 | # remove -1 for the last new line. 288 | for indent in indents[1:]: 289 | yield DEDENT, '', end_pos, '' 290 | yield ENDMARKER, '', end_pos, prefix 291 | --------------------------------------------------------------------------------