diff --git a/examples/example_binary.py b/examples/example_binary.py index 3cad107..6af66ff 100644 --- a/examples/example_binary.py +++ b/examples/example_binary.py @@ -26,7 +26,6 @@ import sys from refex import cli -from refex import formatting from refex import search from refex.python import syntactic_template from refex.python.matchers import syntax_matchers diff --git a/examples/example_custom_matcher.py b/examples/example_custom_matcher.py index 1db06d4..d314ca3 100644 --- a/examples/example_custom_matcher.py +++ b/examples/example_custom_matcher.py @@ -23,8 +23,6 @@ from refex.python import matcher from refex.python.matchers import base_matchers -from refex import search -from refex import match @attr.s(frozen=True) diff --git a/examples/test_example_custom_matcher.py b/examples/test_example_custom_matcher.py index 37a4381..4620a1c 100644 --- a/examples/test_example_custom_matcher.py +++ b/examples/test_example_custom_matcher.py @@ -13,9 +13,10 @@ # limitations under the License. """Tests for refex.examples.example_custom_matcher.""" -from refex.examples import example_custom_matcher from absl.testing import absltest + from refex import search +from refex.examples import example_custom_matcher from refex.python import syntactic_template diff --git a/pyproject.toml b/pyproject.toml index 2be69e6..0626970 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,13 +43,6 @@ docs = ["m2r", "sphinx"] [tool.isort] profile = "google" -# Upstream version within Google actually uses contextlib2 and mock for Python 2 -# compatibility, and this is transformed away by copybara. -# Adding contextlib2 to the stdlib makes the sorting correct in the canonical -# github version even when run on the upstream version pre-copybara. Adding -# mock... doesn't help as much (sorts as "mock", not "unittest"). -# These can both go away starting in 2021. -extra_standard_library = ["contextlib2", "mock"] # https://tox.readthedocs.io/ [tool.tox] diff --git a/refex/.pylint.rc b/refex/.pylint.rc new file mode 100644 index 0000000..b482355 --- /dev/null +++ b/refex/.pylint.rc @@ -0,0 +1,669 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code +extension-pkg-whitelist= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Arbitrary Python code to execute before linting. +#init-hook= + +# Use multiple processes to speed up Pylint. +jobs=1 + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=no + +# Use a custom configuration file for linting. +#rcfile= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once).You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use"--disable=all --enable=classes +# --disable=W" +disable=misplaced-comparison-constant, + trailing-newlines, + multiple-imports, + wrong-import-order, + ungrouped-imports, + wrong-import-position, + g-no-augmented-assignment, + bad-option-value, + unsubscriptable-object, + print-statement, + parameter-unpacking, + unpacking-in-except, + backtick, + long-suffix, + old-ne-operator, + old-octal-literal, + import-star-module-level, + non-ascii-bytes-literal, + locally-disabled, + locally-enabled, + file-ignored, + suppressed-message, + useless-suppression, + c-extension-no-member, + no-self-use, + duplicate-code, + too-many-ancestors, + too-many-instance-attributes, + too-few-public-methods, + too-many-public-methods, + too-many-return-statements, + too-many-branches, + too-many-arguments, + too-many-locals, + too-many-statements, + too-many-boolean-expressions, + attribute-defined-outside-init, + fixme, + global-statement, + implicit-str-concat-in-sequence, + apply-builtin, + basestring-builtin, + buffer-builtin, + cmp-builtin, + coerce-builtin, + execfile-builtin, + file-builtin, + long-builtin, + raw_input-builtin, + reduce-builtin, + standarderror-builtin, + unicode-builtin, + xrange-builtin, + coerce-method, + delslice-method, + getslice-method, + setslice-method, + no-absolute-import, + old-division, + dict-iter-method, + dict-view-method, + next-method-called, + metaclass-assignment, + raising-string, + reload-builtin, + oct-method, + hex-method, + nonzero-method, + cmp-method, + input-builtin, + round-builtin, + intern-builtin, + unichr-builtin, + map-builtin-not-iterating, + zip-builtin-not-iterating, + range-builtin-not-iterating, + filter-builtin-not-iterating, + using-cmp-argument, + eq-without-hash, + div-method, + idiv-method, + rdiv-method, + exception-message-attribute, + invalid-str-codec, + sys-max-int, + bad-python3-import, + deprecated-string-function, + deprecated-str-translate-call, + deprecated-itertools-function, + deprecated-types-field, + next-method-defined, + dict-items-not-iterating, + dict-keys-not-iterating, + dict-values-not-iterating + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=old-raise-syntax, + indexing-exception + + +[PATHS] + +# Directories to add to sys.path. +#import-paths= + +# Inject some known modules. +inject-known-modules=no + +# The import path resolver +resolver=blaze + + +[REPORTS] + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# String to print as the module footer. +#module-footer-template= + +# Template for the module header. %(filename)s will be replaced with the name +# of the file under analysis. +#module-header-template= + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details +msg-template={msg_id}:{line:3}{obj_prefix}{obj}: {msg}{sym_separator}[{symbol}] +http://go/gpylint-faq#{symbol} + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio).You can also give a reporter class, eg +# mypackage.mymodule.MyReporterClass. +output-format=sorted-text + +# Tells whether to display a full report or only the messages +reports=no + +# Activate the evaluation score. +score=yes + + +[MODES] + +# DEPRECATED. +disable-docstring=no + +# DEPRECATED, use --mode=base +google=no + +# The configuration modes to activate (default: base). +mode=base + +# Suppress un-pyformat-able line-too-long warnings. +pyformat-filter-line-too-long=yes + +# The mode to use when import path setup fails (default: style). +safe-mode=style + +# DEPRECATED, use --mode=style +single-file=no + +# DEPRECATED, use --mode=test +test=no + +# A pattern for file names that should activate test mode. +test-filename-pattern=_(unit|reg)?test\.py$ + +# The configuration mode to use for tests (default: test). +test-mode=test + + +[GOOGLE IMPORTS] + +# List of modules that should be ignored if unused. +ignore-unused-imports=google3 + +# Apply g-importing-members checks to stdlib imports only +importing-members-check-stdlib-only=no + + +[ASTROID LOADER] + +# ensure external imports will not be made +standalone=no + + +[BASIC] + +# Naming style matching correct argument names +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style +argument-rgx=^[a-z][a-z0-9_]*$ + +# Naming style matching correct attribute names +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style +attr-rgx=^_{0,2}[a-z][a-z0-9_]*$ + +# Bad variable names which should always be refused, separated by a comma +bad-names= + +# Naming style matching correct class attribute names +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style +class-attribute-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Naming style matching correct class names +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming-style +class-rgx=^_?[A-Z][a-zA-Z0-9]*$ + +# Naming style matching correct constant names +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style +const-rgx=^(_?[A-Z][A-Z0-9_]*|__[a-z0-9_]+__|_?[a-z][a-z0-9_]*)$ + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=10 + +# Naming style matching correct function names +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style +function-rgx=^(?:(?PsetUp|tearDown|setUpModule|tearDownModule)|(?P_?[A-Z][a-zA-Z0-9]*)|(?P_?[a-z][a-z0-9_]*))$ + +# Good variable names which should always be accepted, separated by a comma +good-names=main, + _ + +# Include a hint for the correct naming format with invalid-name +include-naming-hint=no + +# Naming style matching correct inline iteration names +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style +inlinevar-rgx=^[a-z][a-z0-9_]*$ + +# Naming style matching correct method names +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style +method-rgx=(?x)^(?:(?P_[a-z0-9_]+__|runTest|setUp|tearDown|setUpTestCase|tearDownTestCase|setupSelf|tearDownClass|setUpClass|(test|assert)_*[A-Z0-9][a-zA-Z0-9_]*|next)|(?P_{0,2}[A-Z][a-zA-Z0-9_]*)|(?P_{0,2}[a-z][a-z0-9_]*))$ + +# Naming style matching correct module names +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style +module-rgx=^(_?[a-z][a-z0-9_]*)|__init__|PRESUBMIT|PRESUBMIT_unittest$ + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group=function:method + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=(__.*__|main) + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +property-classes=abc.abstractproperty,google3.pyglib.function_utils.cached.property,cached_property.cached_property,cached_property.threaded_cached_property,cached_property.cached_property_with_ttl,cached_property.threaded_cached_property_with_ttl + +# Naming style matching correct variable names +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style +variable-rgx=^[a-z][a-z0-9_]*$ + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=(?x) +(\$Id:\s\/\/depot\/.+#\d+\s\$ +|^\s*\#\ LINT\.ThenChange +) + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=80 + +# Maximum number of lines in a module +max-module-lines=99999 + +# List of optional constructs for which whitespace checking is disabled. `dict- +# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. +# `trailing-comma` allows a space between comma and closing bracket: (a, ). +# `empty-line` allows space-only lines. +no-space-check= + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=yes + + +[LOGGING] + +# Logging modules to check that the string format arguments are in logging +# function parameter format +logging-modules=logging,absl.logging,google3.pyglib.logging,google3.video.youtube.src.python.util.slogging,tensorflow.google.logging + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[STRING_CONSTANT] + +# This flag controls whether the implicit-str-concat-in-sequence should +# generate a warning if the string is surrounded by parentheses. +allow-parenthesized-str-concat-in-sequences=no + +# This flag controls whether the implicit-str-concat-in-sequence should +# generate a warning on implicit string concatenation in sequences defined over +# several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager,contextlib2.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid to define new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expectedly +# not used). +dummy-variables-rgx=^\*{0,2}(_$|unused_|dummy_) + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six,six.moves,past.builtins,future.builtins,functools + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes= + + +[GOOGLE AST] + +# List of module members that should be marked as deprecated. +deprecated-members=string.atof,string.atoi,string.atol,string.capitalize,string.expandtabs,string.find,string.rfind,string.index,string.rindex,string.count,string.lower,string.split,string.rsplit,string.splitfields,string.join,string.joinfields,string.lstrip,string.rstrip,string.strip,string.swapcase,string.translate,string.upper,string.ljust,string.rjust,string.center,string.zfill,string.replace,sys.exitfunc,sys.maxint,tensorflow.contrib + +# Maximum number of lines for the body of a lambda +short-func-length=1 + +# Maximum number of lines over which a conditional expression may be spread +short-if-length=3 + + +[GOOGLE DOCSTRING] + +# List of exceptions that do not need to be mentioned in the Raises section of +# a docstring. +ignore-exceptions=AssertionError, + AttributeError, + NotImplementedError, + StopIteration, + TypeError, + ValueError + +# List of class names (like "ndb.Model") that should be considered equivalent +# to "object" for the purpose of attribute checking +known-base-classes= + +# Always require an Attributes section for known objects. +require-docstring-attributes=no + + +[GOOGLE DEPRECATED ASSERT] + +# List of deprecated test assertions that should be allowed. +allowed-methods= + + +[GENERICASSERTCHECKER] + +# List of test assertions that should not be suggested as replacements. +suppress-suggesting-methods= + + +[GOOGLE LINES] + +# Regexp for a proper copyright notice. +copyright=Copyright \d{4} Google Inc\. +All [Rr]ights [Rr]eserved\. + + +[GOOGLE TOKENS] + +# A regex for finding comments that do not have a space between leading comment +# separators and textual content. +comment-starts-without-space=\A#[^\s\w]*\w + +# Regexp for a proper TODO comment; the uid group, if any, should match the +# user ID of the relevant person +good-todo=# ?TODO\((?P[a-z][a-z0-9-]*)|b/(?P[0-9]+)\):? + +# Minimum number of spaces between the end of a line and an inline comment. +min-comment-space=2 + +# Regexp for a TODO comment, which may be incorrect. +todo=(?i)#\s*todo\b + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls, + class_ + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# Maximum number of arguments for function / method +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in a if statement +max-bool-expr=5 + +# Maximum number of branch for function / method body +max-branches=12 + +# Maximum number of locals for function / method body +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body +max-returns=6 + +# Maximum number of statements in function / method body +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[IMPORTS] + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma +deprecated-modules=regsub, + TERMIOS, + Bastion, + rexec, + sets + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled) +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled) +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled) +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "Exception" +overgeneral-exceptions=StandardError, + Exception, + BaseException diff --git a/refex/fix/find_fixer.py b/refex/fix/find_fixer.py index 2708ae1..0dfc744 100644 --- a/refex/fix/find_fixer.py +++ b/refex/fix/find_fixer.py @@ -56,7 +56,7 @@ def _register_builtins(): _register_builtins() -def from_pattern(fixer_pattern): +def from_pattern(fixer_pattern: str) -> fixer.CombiningPythonFixer: """Provide a fixer that combines all the fixers specified in `fixer_pattern`. To get all the default fixers, pass '*'. Otherwise, to get a group of fixers diff --git a/refex/fix/fixer.py b/refex/fix/fixer.py index 5297a1c..7f81a0b 100644 --- a/refex/fix/fixer.py +++ b/refex/fix/fixer.py @@ -19,17 +19,14 @@ from __future__ import print_function import abc -import operator -from typing import Callable, List, Mapping, Optional, Text, Union, TypeVar +import string +from typing import Callable, List, Mapping, Optional, Text, TypeVar, Union import attr import cached_property -import six from refex import formatting -from refex import future_string from refex import search -from refex import substitution from refex.python import matcher from refex.python.matchers import base_matchers from refex.python.matchers import syntax_matchers @@ -62,7 +59,7 @@ class CombiningPythonFixer(search.FileRegexFilteredSearcher, ``AnyOf``, allowing for optimized traversal. """ fixers = attr.ib(type=List[PythonFixer]) - include_regex = attr.ib(default=r'.*[.]py$') + include_regex = attr.ib(default=r'.*[.]py$', type=str) @fixers.validator def _fixers_validator(self, attribute, fixers): @@ -73,10 +70,10 @@ def _fixers_validator(self, attribute, fixers): ) # Override _matcher definition, as it's now computed based on fixers. - _matcher = attr.ib(init=False) + matcher = attr.ib(init=False, type=matcher.Matcher) - @_matcher.default - def _matcher_default(self): + @matcher.default + def matcher_default(self): return base_matchers.AnyOf( *(fixer.matcher_with_meta for fixer in self.fixers)) @@ -104,15 +101,15 @@ class SimplePythonFixer(PythonFixer): corresponding example_replacement is as well. significant: Whether the suggestions are going to be significant. """ - _matcher = attr.ib() # type: matcher.Matcher - _replacement = attr.ib( - ) # type: Union[formatting.Template, Mapping[Text, formatting.Template]] - _message = attr.ib(default=None) # type: Optional[six.text_type] - _url = attr.ib(default=None) # type: Optional[six.text_type] - _category = attr.ib(default=None) # type: Text - _example_fragment = attr.ib(default=None) # type: Optional[Text] - _example_replacement = attr.ib(default=None) # type: Optional[Text] - _significant = attr.ib(default=True) # type: bool + _matcher = attr.ib(type=matcher.Matcher) + _replacement = attr.ib(type=Union[formatting.Template, + Mapping[Text, formatting.Template]]) + _message = attr.ib(default=None, type=Optional[str]) + _url = attr.ib(default=None, type=Optional[str]) + _category = attr.ib(default=None, type=str) + _example_fragment = attr.ib(default=None, type=Optional[str]) + _example_replacement = attr.ib(default=None, type=Optional[str]) + _significant = attr.ib(default=True, type=bool) @cached_property.cached_property def matcher_with_meta(self): @@ -146,7 +143,7 @@ def example_fragment(self): return None if self._matcher.restrictions: return None - return future_string.Template(self._matcher.pattern).substitute( + return string.Template(self._matcher.pattern).substitute( ImmutableDefaultDict(lambda k: k)) def example_replacement(self): @@ -159,7 +156,7 @@ def example_replacement(self): raise TypeError( 'Cannot autogenerate an example replacement unless the replacement' ' template applies to the whole match.') - return future_string.Template(self._replacement.template).substitute( + return string.Template(self._replacement.template).substitute( ImmutableDefaultDict(lambda k: k)) @@ -170,10 +167,7 @@ def example_replacement(self): @attr.s(frozen=True) class ImmutableDefaultDict(Mapping[KeyType, ValueType]): """Immutable mapping that returns factory(key) as a value, always.""" - - # TODO: Callable[[KeyType], ValueType] - # It isn't supported yet in pytype. :( - _factory = attr.ib() # type: Callable + _factory = attr.ib(type=Callable[[KeyType], ValueType]) def __getitem__(self, key: KeyType) -> ValueType: return self._factory(key) diff --git a/refex/fix/fixers/idiom_fixers.py b/refex/fix/fixers/idiom_fixers.py index d36f4a1..c47f6a5 100644 --- a/refex/fix/fixers/idiom_fixers.py +++ b/refex/fix/fixers/idiom_fixers.py @@ -21,10 +21,10 @@ from __future__ import print_function from __future__ import unicode_literals # for convenience +import string import textwrap from refex import formatting -from refex import future_string from refex.fix import fixer from refex.python import matcher as matcher_ from refex.python import syntactic_template @@ -60,8 +60,8 @@ def idiom_fixer( dotdotdot = fixer.ImmutableDefaultDict(lambda _: '...') return fixer.SimplePythonFixer( message=('This could be more Pythonic: %s -> %s.' % - ((future_string.Template(old_expr).substitute(dotdotdot), - future_string.Template(new_expr).substitute(dotdotdot)))), + ((string.Template(old_expr).substitute(dotdotdot), + string.Template(new_expr).substitute(dotdotdot)))), matcher=syntax_matchers.ExprPattern(old_expr), replacement=syntactic_template.PythonExprTemplate(new_expr), url=url, diff --git a/refex/fix/fixers/test_modern_python_fixers.py b/refex/fix/fixers/test_modern_python_fixers.py index ff3c335..68062fe 100644 --- a/refex/fix/fixers/test_modern_python_fixers.py +++ b/refex/fix/fixers/test_modern_python_fixers.py @@ -21,8 +21,8 @@ from absl.testing import parameterized from refex import search -from refex.fix.fixers import modern_python_fixers from refex.fix import fixer +from refex.fix.fixers import modern_python_fixers def _rewrite(fx, source): diff --git a/refex/fix/fixers/unittest_fixers.py b/refex/fix/fixers/unittest_fixers.py index 8b0a8b2..2096f53 100644 --- a/refex/fix/fixers/unittest_fixers.py +++ b/refex/fix/fixers/unittest_fixers.py @@ -25,7 +25,8 @@ from __future__ import print_function from __future__ import unicode_literals # for convenience -from refex import future_string +import string + from refex.fix import fixer from refex.python import syntactic_template from refex.python.matchers import syntax_matchers @@ -45,8 +46,8 @@ def assert_alias_fixer(old_expr, new_expr): return fixer.SimplePythonFixer( message=('{old} is a deprecated alias for {new} in the unittest module.' .format( - old=future_string.Template(old_expr).substitute(dotdotdot), - new=future_string.Template(new_expr).substitute(dotdotdot))), + old=string.Template(old_expr).substitute(dotdotdot), + new=string.Template(new_expr).substitute(dotdotdot))), matcher=syntax_matchers.ExprPattern(old_expr), replacement=syntactic_template.PythonExprTemplate(new_expr), url='https://docs.python.org/3/library/unittest.html#deprecated-aliases', @@ -74,8 +75,8 @@ def assert_message_fixer(old_expr, new_expr, method): dotdotdot = fixer.ImmutableDefaultDict(lambda _: '...') return fixer.SimplePythonFixer( message=('%s will give more detailed error information than %s.' % - (future_string.Template(new_expr).substitute(dotdotdot), - future_string.Template(old_expr).substitute(dotdotdot))), + (string.Template(new_expr).substitute(dotdotdot), + string.Template(old_expr).substitute(dotdotdot))), matcher=syntax_matchers.ExprPattern(old_expr), replacement=syntactic_template.PythonExprTemplate(new_expr), url=( diff --git a/refex/fix/test_fixer.py b/refex/fix/test_fixer.py index 598ab3a..e4055d4 100644 --- a/refex/fix/test_fixer.py +++ b/refex/fix/test_fixer.py @@ -18,15 +18,14 @@ from __future__ import print_function from __future__ import unicode_literals # for convenience. +import string from unittest import mock -import re from absl.testing import absltest from absl.testing import parameterized import attr from refex import formatting -from refex import future_string from refex import search from refex import substitution from refex.fix import find_fixer @@ -319,7 +318,7 @@ class ImmutableDefaultDictTest(absltest.TestCase): def test_replacement(self): self.assertEqual( - future_string.Template('$a == $b').substitute( + string.Template('$a == $b').substitute( fixer.ImmutableDefaultDict(lambda k: k)), 'a == b') def test_len(self): diff --git a/refex/formatting.py b/refex/formatting.py index 0f8fd88..acf106a 100644 --- a/refex/formatting.py +++ b/refex/formatting.py @@ -76,18 +76,15 @@ import itertools import operator import sre_parse -import subprocess -import sys -import tempfile -import typing -from typing import Any, Iterable, Mapping, Optional, Set, Text, Tuple +import string +from typing import (Any, Dict, Iterable, Iterator, Mapping, Optional, Set, Text, + Tuple) import attr import cached_property import colorama import six -from refex import future_string from refex import match as _match from refex import parsed_file from refex import substitution @@ -103,7 +100,7 @@ # TODO: Move this onto the Substitution as a "context" span. -def line_expanded_span(s, start, end): +def line_expanded_span(s: str, start: int, end: int) -> Tuple[int, int]: """Expands a slice of a string to the edges of the lines it overlaps. The start is moved left until it takes place after the preceding newline, @@ -305,10 +302,12 @@ class Renderer(object): :meth:`render()`. color: Whether to style and colorize human-readable output or not. """ - _match_format = attr.ib(default='{head}{match}{tail}') - color = attr.ib(default=True) - _label_to_style = attr.ib(factory={frozenset(): ''}.copy, init=False) - _styles = attr.ib(default=itertools.cycle(_DEFAULT_STYLES), init=False) + _match_format = attr.ib(default='{head}{match}{tail}', type=str) + color = attr.ib(default=True, type=bool) + _label_to_style = attr.ib( + factory={frozenset(): ''}.copy, init=False, type=Dict[Set, str]) + _styles = attr.ib( + default=itertools.cycle(_DEFAULT_STYLES), init=False, type=Iterator[str]) def render( self, @@ -533,7 +532,7 @@ class LiteralTemplate(Template): """A no-op template which does no substitution at all.""" #: The source template. - template = attr.ib(type=Text) + template = attr.ib(type=str) variables = frozenset() def substitute_match(self, parsed, match, matches): @@ -549,13 +548,13 @@ class ShTemplate(Template): """ #: The source template. - template = attr.ib(type=Text) + template = attr.ib(type=str) - _template = attr.ib(repr=False, init=False) + _template = attr.ib(repr=False, init=False, type=string.Template) @_template.default def _template_default(self): - return future_string.Template(self.template) + return string.Template(self.template) def substitute_match(self, parsed, match, matches): del match # unused diff --git a/refex/future_string.py b/refex/future_string.py deleted file mode 100644 index 920dd72..0000000 --- a/refex/future_string.py +++ /dev/null @@ -1,92 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -r"""Text-only string.Template wrapper for 2/3 straddling code. - -In Python 2, string.Template is bytes-only. In Python 3, it is text-only. This -presents a migration hazard. - -One way out: string.Template "technically" works for unicode in the basic case: - - $ python2 - >>> import string - >>> string.Template(u'$x').substitute({u'x': u'\uffef'}) - u'\uffef' - -But there are hairy edge cases: - - >>> class A(object): - ... def __unicode__(self): return u'unicode' - ... def __str__(self): return b'bytes' - ... - >>> string.Template(u'$x').substitute({u'x': A()}) - u'bytes' - -In addition, pytype and mypy will claim that Python 2 string.Template doesn't -support unicode at _all_, resulting in errors at build time. - -future_string.Template wraps string.Template in a way that eliminates the hairy -edge cases, and satisfies the type checkers: - - >>> from refex import future_string - >>> future_string.Template(u'$x').substitute({u'x': A()}) - u'unicode' -""" -from __future__ import absolute_import -from __future__ import division -# from __future__ import google_type_annotations -from __future__ import print_function - -import collections -import string -from typing import Any, Mapping, Text - -import six - -# For refex_doctest.py -# The examples are specific to Python 2. -DOCTEST_RUN = six.PY2 - - -class Template(string.Template): - """A text-only string.Template subclass. - - This doesn't support the full API of string.Template, but enough to get by. - (In particular, the substitute methods don't accept **kwargs.) - """ - - def __init__(self, template: Text): - super(Template, self).__init__(template) - self.template = template # override the type of .template. - - def substitute(self, variables: Mapping[Text, Any]) -> Text: - return super(Template, self).substitute(_LazyTextDict(variables)) - - def safe_substitute(self, variables: Mapping[Text, Any]) -> Text: - return super(Template, self).safe_substitute(_LazyTextDict(variables)) - - -class _LazyTextDict(collections.Mapping): - """A dict wrapper which converts values to text when items are accessed.""" - - def __init__(self, d: Mapping[Text, Any]): - self._d = d - - def __getitem__(self, key: Text) -> Text: - return six.text_type(self._d[key]) - - def __len__(self): - return len(self._d) - - def __iter__(self): - return iter(self._d) diff --git a/refex/match.py b/refex/match.py index 5179349..fc8477a 100644 --- a/refex/match.py +++ b/refex/match.py @@ -81,8 +81,9 @@ class StringMatch(Match): """A match which can be a source for substitution. .. attribute:: string - .. attribute:: span""" - string = attr.ib() + .. attribute:: span + """ + string = attr.ib(type=str) @attr.s(frozen=True) @@ -90,8 +91,9 @@ class SpanMatch(StringMatch): """A match which can be both a source *and* destination for substitution. .. attribute:: string - .. attribute:: span""" - span = attr.ib() + .. attribute:: span + """ + span = attr.ib(type=Tuple[int, int]) @classmethod def from_text(cls, text: str, span: Tuple[int, int]) -> "SpanMatch": @@ -108,4 +110,4 @@ class ObjectMatch(Match): .. attribute:: span """ #: An object associated with the match. - matched = attr.ib() # type: Any + matched = attr.ib(type=Any) diff --git a/refex/parsed_file.py b/refex/parsed_file.py index f93777f..3a4e9e7 100644 --- a/refex/parsed_file.py +++ b/refex/parsed_file.py @@ -25,7 +25,7 @@ from __future__ import unicode_literals import re -from typing import Iterable, Mapping, Optional, Text +from typing import Iterable, Mapping, Optional import asttokens import attr @@ -50,7 +50,7 @@ class ParsedFile(object): line_numbers: A cache for line number <-> codepoint offset conversion. """ - text = attr.ib(type=Text) + text = attr.ib(type=str) path = attr.ib(type=str) pragmas = attr.ib(type=Iterable["Pragma"]) @@ -87,8 +87,8 @@ class Pragma(object): start: The start (codepoint offset) of the pragma in the file. Inclusive. end: The end (codepoint offset) of the pragma in the file. Exclusive. """ - tag = attr.ib(type=Text) - data = attr.ib(type=Mapping[Text, Text]) + tag = attr.ib(type=str) + data = attr.ib(type=Mapping[str, str]) start = attr.ib(type=int) end = attr.ib(type=int) diff --git a/refex/python/matcher.py b/refex/python/matcher.py index c15437a..ab01f05 100644 --- a/refex/python/matcher.py +++ b/refex/python/matcher.py @@ -86,7 +86,7 @@ import functools import sys import tokenize -from typing import Any, Dict, Iterator, Optional, Text +from typing import Any, Dict, Iterator, Optional, Text, Union import weakref from absl import logging @@ -147,8 +147,7 @@ def register_enum(cls): return cls -def register_constant(name, constant): - # type: (Text, Any) -> None +def register_constant(name: str, constant: Any): """Registers a constant for use in evaluate.py.""" if name in registered_constants: raise AssertionError('Two conflicting constants: %r, %r' % constant, @@ -176,12 +175,13 @@ def _coerce_list(values): return [coerce(v) for v in values] +# TODO(b/199577701): drop the **kwargs: Any in the *_attrib functions. + _IS_SUBMATCHER_ATTRIB = __name__ + '._IS_SUBMATCHER_ATTRIB' _IS_SUBMATCHER_LIST_ATTRIB = __name__ + '._IS_SUBMATCHER_LIST_ATTRIB' -def submatcher_attrib( - *args, **kwargs): # TODO: make walk a kwarg when Py2 support is dropped. +def submatcher_attrib(*args, walk: bool = True, **kwargs: Any): """Creates an attr.ib that is marked as a submatcher. This will cause the matcher to be automatically walked as part of the @@ -196,14 +196,13 @@ def submatcher_attrib( Returns: An attr.ib() """ - if kwargs.pop('walk', True): + if walk: kwargs.setdefault('metadata', {})[_IS_SUBMATCHER_ATTRIB] = True kwargs.setdefault('converter', coerce) return attr.ib(*args, **kwargs) -def submatcher_list_attrib( - *args, **kwargs): # TODO: make walk a kwarg when Py2 support is dropped. +def submatcher_list_attrib(*args, walk: bool = True, **kwargs: Any): """Creates an attr.ib that is marked as an iterable of submatchers. This will cause the matcher to be automatically walked as part of the @@ -218,7 +217,7 @@ def submatcher_list_attrib( Returns: An attr.ib() """ - if kwargs.pop('walk', True): + if walk: kwargs.setdefault('metadata', {})[_IS_SUBMATCHER_LIST_ATTRIB] = True kwargs.setdefault('converter', _coerce_list) return attr.ib(*args, **kwargs) @@ -294,10 +293,15 @@ def span(self): class LexicalASTMatch(match.ObjectMatch, LexicalMatch): """AST match with adjustable start/end tokens.""" # Override for better type checking. - matched = None # type: ast.AST + matched: ast.AST = None + + +# TODO: describe create_match with overloads for more precise type checking. -def create_match(parsed, matched): +def create_match( + parsed: PythonParsedFile, matched: Any +) -> Union[LexicalASTMatch, match.StringMatch, match.ObjectMatch]: """Construct the most precise match for an object. This does a type check on `matched` to see if it has lexical information, but @@ -323,7 +327,7 @@ def create_match(parsed, matched): return match.ObjectMatch(matched) -def _is_lexical_match(matched): +def _is_lexical_match(matched: Any) -> bool: """Returns whether the match can be a lexical one. Its not well documented what ast objects return token information and @@ -336,7 +340,6 @@ def _is_lexical_match(matched): Returns: whether it can be a LexicalASTMatch or not. """ - # type: (Any) -> bool first_token = getattr(matched, 'first_token', None) last_token = getattr(matched, 'last_token', None) if not (first_token and last_token): @@ -662,8 +665,8 @@ class MatchInfo(object): """ match = attr.ib(type=_match.Match) # TODO: also add a top-level `replacement` variable, replacing the magic root. - bindings = attr.ib(factory=dict, type=Dict[str, _match.Match]) - replacements = attr.ib(factory=dict, type=Dict[str, _match.Match]) + bindings = attr.ib(factory=dict, type=Dict[str, BoundValue]) + replacements = attr.ib(factory=dict, type=Dict[str, formatting.Template]) def _stringify_candidate(context, candidate): @@ -861,7 +864,7 @@ class ImplicitEquals(Matcher): implicitly). """ - _value = attr.ib() # type: Any + _value = attr.ib(type=Any) def _match(self, context, candidate): if candidate == self._value: diff --git a/refex/python/matchers/base_matchers.py b/refex/python/matchers/base_matchers.py index 8831375..2deb7b3 100644 --- a/refex/python/matchers/base_matchers.py +++ b/refex/python/matchers/base_matchers.py @@ -502,7 +502,7 @@ class TypeIs(matcher.Matcher): (This does *not* check any type information for the code that this candidate AST node might represent.) """ - _type = attr.ib() # type: type + _type = attr.ib(type=type) def _match(self, context, candidate): if type(candidate) == self._type: @@ -540,9 +540,9 @@ class MatchesRegex(matcher.Matcher): The bound matches are neither lexical nor syntactic, but purely on codepoint spans. """ - _regex = attr.ib() # type: str + _regex = attr.ib(type=str) _subpattern = matcher.submatcher_attrib( - default=Anything()) # type: matcher.Matcher + default=Anything(), type=matcher.Matcher) @cached_property.cached_property def _wrapped_regex(self): @@ -593,7 +593,7 @@ def bind_variables(self): @attr.s(frozen=True) class FileMatchesRegex(matcher.Matcher): """Matches iff ``regex`` matches anywhere in the candidate's file.""" - _regex = attr.ib() # type: str + _regex = attr.ib(type=str) @cached_property.cached_property def _compiled(self): @@ -659,7 +659,7 @@ class ItemsAre(matcher.Matcher): """ # store the init parameters for a pretty repr and .bind_variables - _matchers = matcher.submatcher_list_attrib() # type: List[matcher.Matcher] + _matchers = matcher.submatcher_list_attrib(type=List[matcher.Matcher]) @matcher.accumulating_matcher def _match(self, context, candidate): @@ -710,7 +710,7 @@ class InLines(matcher.Matcher): # Lines should normally be either a set or for contiguous sequences, a `range` # object produced by calling `range(x, y)` - lines = attr.ib() # type: Container[int] + lines = attr.ib(type=Container[int]) def _match(self, context, candidate): diff --git a/refex/python/matchers/test_base_matchers.py b/refex/python/matchers/test_base_matchers.py index 7b810fa..8f115ee 100644 --- a/refex/python/matchers/test_base_matchers.py +++ b/refex/python/matchers/test_base_matchers.py @@ -20,17 +20,18 @@ from __future__ import print_function import ast +from unittest import mock from absl.testing import absltest from absl.testing import parameterized -from unittest import mock +from six.moves import range + from refex import match from refex.python import evaluate from refex.python import matcher from refex.python import matcher_test_util from refex.python.matchers import ast_matchers from refex.python.matchers import base_matchers -from six.moves import range _NOTHING = base_matchers.Unless(base_matchers.Anything()) _FAKE_CONTEXT = matcher.MatchContext(matcher.parse_ast('', 'foo.py')) diff --git a/refex/python/matchers/test_syntax_matchers.py b/refex/python/matchers/test_syntax_matchers.py index 859134f..45779cc 100644 --- a/refex/python/matchers/test_syntax_matchers.py +++ b/refex/python/matchers/test_syntax_matchers.py @@ -19,9 +19,9 @@ from __future__ import division from __future__ import print_function -from unittest import mock import textwrap import unittest +from unittest import mock from absl.testing import absltest from absl.testing import parameterized diff --git a/refex/python/syntactic_template.py b/refex/python/syntactic_template.py index 58898a1..7a2e190 100644 --- a/refex/python/syntactic_template.py +++ b/refex/python/syntactic_template.py @@ -133,8 +133,8 @@ class _BasePythonTemplate(formatting.Template): template: The source template """ template = attr.ib(type=Text) - _lexical_template = attr.ib(repr=False, init=False) # type: _LexicalTemplate - _ast_matcher = attr.ib(repr=False, init=False) # type: matcher.Matcher + _lexical_template = attr.ib(repr=False, init=False, type=_LexicalTemplate) + _ast_matcher = attr.ib(repr=False, init=False, type=matcher.Matcher) def __attrs_post_init__(self): if not isinstance(self.template, six.text_type): diff --git a/refex/refex_doctest.py b/refex/refex_doctest.py index 51f32f5..c813727 100644 --- a/refex/refex_doctest.py +++ b/refex/refex_doctest.py @@ -19,6 +19,11 @@ from __future__ import division from __future__ import print_function +# isort: split +# We put doctest after absltest so that it picks up the unittest monkeypatch. +# Otherwise doctest tests aren't runnable at all with Bazel. + +import doctest import sys from absl.testing import absltest @@ -26,12 +31,6 @@ import refex.python.matcher_test_util # so that it's found by _submodules: pylint: disable=unused-import import refex.search -# isort: split -# We put doctest after absltest so that it picks up the unittest monkeypatch. -# Otherwise doctest tests aren't runnable at all with Bazel. - -import doctest - def _submodules(package_module): """Gets submodules in a package. diff --git a/refex/rxerr_debug.py b/refex/rxerr_debug.py index 508b8a0..bc26713 100644 --- a/refex/rxerr_debug.py +++ b/refex/rxerr_debug.py @@ -46,18 +46,19 @@ def main(argv): source = failure['content'] except KeyError: pass - with tempfile.NamedTemporaryFile( - mode='w', encoding='utf-8', suffix='.py', delete=False) as out_f: - out_f.write(source) - print('Content:', out_f.name) + else: + with tempfile.NamedTemporaryFile( + mode='w', encoding='utf-8', suffix='.py', delete=False) as out_f: + out_f.write(source) + print('Content:', out_f.name) try: tb = failure['traceback'] except KeyError: pass else: - print( - pygments.highlight(tb, lexers.PythonTracebackLexer(), - formatters.Terminal256Formatter())) + lexer = lexers.PythonTracebackLexer() # pytype: disable=module-attr + formatter = formatters.Terminal256Formatter() # pytype: disable=module-attr + print(pygments.highlight(tb, lexer, formatter)) if __name__ == '__main__': diff --git a/refex/search.py b/refex/search.py index 1132ce0..c72ddc1 100644 --- a/refex/search.py +++ b/refex/search.py @@ -110,7 +110,7 @@ from refex import parsed_file from refex import substitution from refex.python import evaluate -from refex.python import matcher +from refex.python import matcher as _matcher from refex.python.matchers import base_matchers from refex.python.matchers import syntax_matchers @@ -225,8 +225,8 @@ def _fixed_point( for i in range(max_iterations): rewritten = formatting.apply_substitutions(text, new_substitutions) try: - parsed = matcher.parse_ast(rewritten, parsed.path) - except matcher.ParseError as e: + parsed = _matcher.parse_ast(rewritten, parsed.path) + except _matcher.ParseError as e: logging.error( 'Could not parse rewritten substitution in %s: %s\n' 'Tried to rewrite text[%s:%s] == %r\n' @@ -362,7 +362,7 @@ class WrappedSearcher(AbstractSearcher): Attributes: searcher: the wrapped searcher. """ - searcher = attr.ib() + searcher = attr.ib(type=AbstractSearcher) def parse(self, *args, **kwargs): return self.searcher.parse(*args, **kwargs) @@ -382,7 +382,7 @@ class PragmaSuppressedSearcher(WrappedSearcher): def find_iter_parsed( self, - parsed: matcher.PythonParsedFile) -> Iterable[substitution.Substitution]: + parsed: _matcher.PythonParsedFile) -> Iterable[substitution.Substitution]: return substitution.suppress_exclude_bytes( self.searcher.find_iter_parsed(parsed), _pragma_excluded_ranges(parsed), @@ -420,7 +420,7 @@ class CombinedSearcher(AbstractSearcher): # could walk once and only run the searchers that could possibly match # at a given point using an O(1) type lookup -- which would generally cut # down the number of results. - searchers = attr.ib(type=Tuple[AbstractSearcher, ...], converter=tuple,) + searchers = attr.ib(type=Sequence[AbstractSearcher], converter=tuple) def parse(self, data: Text, filename: str): """Parses using each sub-searcher, returning the most specific parsed file. @@ -488,7 +488,7 @@ def find_iter_parsed(self, parsed): def _pragma_excluded_ranges( - parsed: matcher.PythonParsedFile) -> Mapping[Text, Sequence[Span]]: + parsed: _matcher.PythonParsedFile) -> Mapping[Text, Sequence[Span]]: """Returns ranges for the parsed file that were disabled by "disable" pragmas. "enable" pragmas override "disable" pragmas within their scope and vice versa. @@ -513,7 +513,7 @@ def _pragma_excluded_ranges( return disabled -def _pragma_ranges(parsed: matcher.PythonParsedFile, +def _pragma_ranges(parsed: _matcher.PythonParsedFile, key: str) -> MutableMapping[Text, MutableSequence[Span]]: """Returns the pragma-annotated ranges for e.g. suppress_exclude_bytes.""" annotated_ranges = {} @@ -630,7 +630,7 @@ def find_dicts_parsed( def key_span_for_dict( self, parsed: parsed_file.ParsedFile, - match_dict: Iterable[Mapping[MatchKey, match.Match]], + match_dict: Mapping[MatchKey, match.Match], ) -> Optional[Tuple[int, int]]: """Returns the ``key_span`` that the final ``Substitution`` will have.""" return None @@ -638,7 +638,7 @@ def key_span_for_dict( def find_iter_parsed( self, - parsed: matcher.PythonParsedFile) -> Iterable[substitution.Substitution]: + parsed: _matcher.PythonParsedFile) -> Iterable[substitution.Substitution]: for match_dict, templates in self.find_dicts_parsed(parsed): try: replacements = formatting.rewrite_templates(parsed, match_dict, @@ -682,19 +682,17 @@ def __attrs_post_init__(self): missing_labels = formatting.template_variables( self.templates) - pattern_labels if missing_labels: + groups = ', '.join(f'`{g}`' for g in sorted(map(str, missing_labels))) raise ValueError( - 'The substitution template(s) referenced groups not available in the regex (`{self._compiled.pattern}`): {groups}' - .format( - self=self, - groups=', '.join( - '`{}`'.format(g) for g in sorted(map(str, missing_labels))))) + f'The substitution template(s) referenced groups not available in the regex (`{self._compiled.pattern}`): {groups}' + ) @classmethod def from_pattern(cls, pattern: str, templates: Optional[Dict[str, formatting.Template]]): return cls(compiled=default_compile_regex(pattern), templates=templates) def find_dicts_parsed( - self, parsed: matcher.PythonParsedFile + self, parsed: _matcher.PythonParsedFile ) -> Iterable[Tuple[Mapping[MatchKey, match.Match], Mapping[ MatchKey, formatting.Template]]]: for m in self._compiled.finditer(parsed.text): @@ -717,8 +715,8 @@ class BasePythonSearcher(AbstractSearcher): def parse(self, data: Text, filename: str): """Returns a :class:`refex.python.matcher.PythonParsedFile`.""" try: - return matcher.parse_ast(data, filename) - except matcher.ParseError as e: + return _matcher.parse_ast(data, filename) + except _matcher.ParseError as e: # Probably Python 2. TODO: figure out how to handle this. raise SkipFileError(str(e)) @@ -731,10 +729,10 @@ def approximate_regex(self): class BasePythonRewritingSearcher(BasePythonSearcher, BaseRewritingSearcher): """Searcher class using :mod``refex.python.matchers``.""" - _matcher = attr.ib() + matcher = attr.ib(type=_matcher.Matcher) @classmethod - def from_matcher(cls, matcher, templates: Optional[Dict[str, formatting.Template]]): + def from_matcher(cls, matcher, templates: Dict[str, formatting.Template]): """Creates a searcher from an evaluated matcher, and adds a root label.""" # We wrap the evaluated matcher in a SystemBind() that is sort of like # "group 0" for regexes. @@ -743,18 +741,18 @@ def from_matcher(cls, matcher, templates: Optional[Dict[str, formatting.Template base_matchers.SystemBind(ROOT_LABEL, matcher), templates)) def find_dicts_parsed( - self, parsed: matcher.PythonParsedFile + self, parsed: _matcher.PythonParsedFile ) -> Iterable[Tuple[Mapping[MatchKey, match.Match], Mapping[ MatchKey, formatting.Template]]]: - for result in matcher.find_iter(self._matcher, parsed): + for result in _matcher.find_iter(self.matcher, parsed): matches = { bound_name: match.value for bound_name, match in result.bindings.items() } yield matches, result.replacements - def key_span_for_dict(self, parsed: matcher.PythonParsedFile, - match_dict: Dict[str, match.Match]): + def key_span_for_dict(self, parsed: _matcher.PythonParsedFile, + match_dict: Dict[str, match.Match]): """Returns a grouping span for the containing simple AST node. Substitutions that lie within a simple statement or expression are @@ -775,7 +773,7 @@ def key_span_for_dict(self, parsed: matcher.PythonParsedFile, """ m = match_dict[ROOT_LABEL] - if not isinstance(m, matcher.LexicalASTMatch): + if not isinstance(m, _matcher.LexicalASTMatch): return None simple_node = parsed.nav.get_simple_node(m.matched) @@ -815,7 +813,7 @@ def from_pattern(cls, pattern: str, templates: Optional[Dict[str, formatting.Te def find_iter_parsed( self, - parsed: matcher.PythonParsedFile) -> Iterable[substitution.Substitution]: + parsed: _matcher.PythonParsedFile) -> Iterable[substitution.Substitution]: # All node IDs that have been removed. removed_nodes = set([]) # All node IDs that have been removed AND whose previous siblings have all @@ -855,7 +853,7 @@ def find_iter_parsed( def _sanitize_removed_stmt( self, - parsed: matcher.PythonParsedFile, + parsed: _matcher.PythonParsedFile, match_dict: Mapping[str, match.Match], sub: substitution.Substitution, removed_nodes: MutableSet[int], @@ -887,7 +885,7 @@ def _sanitize_removed_stmt( replacements = sub.replacements.copy() for metavar, replacement in replacements.items(): match_ = match_dict[metavar] - if not isinstance(match_, matcher.LexicalASTMatch): + if not isinstance(match_, _matcher.LexicalASTMatch): continue ast_match = match_.matched # TODO: Should a comment or another non-statement count? diff --git a/refex/substitution.py b/refex/substitution.py index 9f79fa4..2434674 100644 --- a/refex/substitution.py +++ b/refex/substitution.py @@ -150,7 +150,7 @@ def _validate(self): expected_type=six.text_type.__name__, actual_type=type(replacement).__name__)) - def relative_to_span(self, start: int, end: int) -> "Substitution": + def relative_to_span(self, start: int, end: int) -> Optional['Substitution']: """Returns a new substitution that is offset relative to the provided span. If ``sub`` is a :class:`Substitution` for ``s``, then diff --git a/refex/test_binary.py b/refex/test_binary.py index a790706..0c94d2f 100644 --- a/refex/test_binary.py +++ b/refex/test_binary.py @@ -16,7 +16,6 @@ """A simple test of the refex binary.""" import subprocess -import sys from absl.testing import absltest diff --git a/refex/test_cli.py b/refex/test_cli.py index 0cbf12a..8e2d3a3 100644 --- a/refex/test_cli.py +++ b/refex/test_cli.py @@ -20,12 +20,12 @@ import argparse import contextlib import json -from unittest import mock import os import re import sys import textwrap import unittest +from unittest import mock from absl.testing import absltest from absl.testing import parameterized diff --git a/refex/test_example_binary.py b/refex/test_example_binary.py index b46949a..0f08f5a 100644 --- a/refex/test_example_binary.py +++ b/refex/test_example_binary.py @@ -16,7 +16,6 @@ """A simple test of the example binary.""" import subprocess -import sys from absl.testing import absltest diff --git a/refex/test_future_string.py b/refex/test_future_string.py deleted file mode 100644 index b7c6034..0000000 --- a/refex/test_future_string.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Tests for refex.future_string.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function -from __future__ import unicode_literals - -import collections -import unittest - -from absl.testing import absltest -from absl.testing import parameterized -import attr -import six - -from refex import future_string - - -@attr.s -class Stringifiable(object): - """Test class for future_string.Template string conversion. - - bytes(stringifiable) == stringifiable.byte_string - six.text_type(stringifiable) == stringifiable.text_string - """ - text_string = attr.ib() - byte_string = attr.ib() - - def __bytes__(self): - return self.byte_string - - def __unicode__(self): - return self.text_string - - __str__ = __bytes__ if six.PY2 else __unicode__ - - -class StringifiableTest(object): - """Stringifiable is complex enough to need tests...""" - - def test_stringifiable_text(self): - self.assertEqual( - six.text_type(Stringifiable(text_string=u'text', byte_string=b'bytes')), - u'text') - - def test_stringifiable_bytes(self): - self.assertEqual( - bytes(Stringifiable(text_string=u'text', byte_string=b'bytes')), - b'bytes') - - -@parameterized.named_parameters( - ('substitute', lambda t, m: t.substitute(m)), - ('safe_substitute', lambda t, m: t.safe_substitute(m)), -) -class TemplateTest(parameterized.TestCase): - """Tests for future_string.Template.""" - - @unittest.skipUnless(six.PY2, 'bytes formatting differs in py2 and py3+') - def test_bytes_value_py2(self, substitute): - """Tests that bytes and str (both Text) are interchangeable in Python 2.""" - self.assertEqual( - substitute(future_string.Template(u'hello $var'), {u'var': b'world'}), - u'hello world') - - @unittest.skipIf(six.PY2, 'bytes formatting differs in py2 and py3+') - def test_bytes_value_py3(self, substitute): - self.assertEqual( - substitute(future_string.Template(u'hello $var'), {u'var': b'world'}), - u"hello b'world'") - - def test_text(self, substitute): - self.assertEqual( - substitute(future_string.Template(u'hello $var'), {u'var': u'world'}), - u'hello world') - - def test_text_unencodeable(self, substitute): - self.assertEqual( - substitute( - future_string.Template(u'hello $var'), {u'var': u'world\xff'}), - u'hello world\xff') - - def test_text_convertible(self, substitute): - self.assertEqual( - substitute( - future_string.Template(u'hello $var'), - {u'var': Stringifiable(text_string=u'world', byte_string=b'FAIL')}), - u'hello world') - - def test_text_convertible_unencodeable(self, substitute): - self.assertEqual( - substitute( - future_string.Template(u'hello $var'), { - u'var': - Stringifiable( - text_string=u'world\xff', byte_string=b'FAIL') - }), u'hello world\xff') - - def test_lazy_dict(self, substitute): - self.assertEqual( - substitute( - future_string.Template(u'$x $y'), collections.Counter([u'x'])), - u'1 0') - - -if __name__ == '__main__': - absltest.main() diff --git a/refex/test_rxerr_debug.py b/refex/test_rxerr_debug.py index 05dffdc..f86e865 100644 --- a/refex/test_rxerr_debug.py +++ b/refex/test_rxerr_debug.py @@ -29,6 +29,24 @@ def test_argv(self): # Instead, we can just run shlex.split() over it as a quick safety check. self.assertEqual(shlex.split(stdout.getvalue()), ['Command:'] + argv) + def test_traceback(self): + """Tests that the traceback shows up, ish.""" + tb = ('Traceback (most recent call last):\n' + ' File "", line 1, in \n' + 'SomeError: description\n') + path = self.create_tempfile( + content=json.dumps({'failures': { + 'path': { + 'traceback': tb + } + }})).full_path + stdout = io.StringIO() + with contextlib.redirect_stdout(stdout): + rxerr_debug.main(['rxerr_debug', path]) + stdout = stdout.getvalue() + self.assertIn('SomeError', stdout) + self.assertIn('description', stdout) + if __name__ == '__main__': absltest.main()