diff --git a/.github/workflows/installation.yaml b/.github/workflows/installation.yaml index f64ea552f..f3f2b4c3a 100644 --- a/.github/workflows/installation.yaml +++ b/.github/workflows/installation.yaml @@ -33,7 +33,7 @@ jobs: fail-fast: false matrix: os: ['ubuntu-latest', 'macos-latest', 'windows-latest'] - python-version: ['3.8', '3.9', '3.10', '3.11'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] method: ['install' ,'pip'] include: diff --git a/.github/workflows/tests.yaml b/.github/workflows/tests.yaml index c3218bbc4..33a04c453 100644 --- a/.github/workflows/tests.yaml +++ b/.github/workflows/tests.yaml @@ -38,7 +38,7 @@ jobs: strategy: matrix: os: ['macos-latest', 'ubuntu-latest', 'windows-latest'] - python-version: ['3.8', '3.9', '3.10', '3.11'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] include: - os: macos-latest shells: 'sh,csh,bash,tcsh,zsh,pwsh' diff --git a/setup.py b/setup.py index ad11b1edb..aa1968e9a 100644 --- a/setup.py +++ b/setup.py @@ -68,6 +68,7 @@ def find_files(pattern, path=None, root="rez"): packages=find_packages('src', exclude=["build_utils", "build_utils.*", "tests"]), + install_requires=['setuptools'], package_data={ 'rez': ['utils/logging.conf'] + diff --git a/src/rez/cli/selftest.py b/src/rez/cli/selftest.py index ec4d0e39d..113ca35bc 100644 --- a/src/rez/cli/selftest.py +++ b/src/rez/cli/selftest.py @@ -56,7 +56,7 @@ def __call__(self, parser, namespace, values, option_string=None): prefix = "test_" for importer, name, ispkg in iter_modules([tests_dir]): if not ispkg and name.startswith(prefix): - module = importer.find_module(name).load_module(name) + module = importer.find_spec(name).loader.load_module(name) name_ = name[len(prefix):] all_module_tests.append(name_) tests.append((name_, module)) diff --git a/src/rez/plugin_managers.py b/src/rez/plugin_managers.py index 64b1d8f09..0c3e6b178 100644 --- a/src/rez/plugin_managers.py +++ b/src/rez/plugin_managers.py @@ -159,8 +159,8 @@ def load_plugins(self): # already loaded, so check for that plugin_module = sys.modules.get(modname) if plugin_module is None: - loader = importer.find_module(modname) - plugin_module = loader.load_module(modname) + loader = importer.find_spec(modname) + plugin_module = loader.loader.load_module(modname) elif os.path.dirname(plugin_module.__file__) != path: if config.debug("plugins"): diff --git a/src/rez/vendor/README.md b/src/rez/vendor/README.md index b5918539f..42689f62a 100644 --- a/src/rez/vendor/README.md +++ b/src/rez/vendor/README.md @@ -39,57 +39,51 @@ Our version is patched. atomicwrites -1.2.1 (Aug 30, 2018) +1.4.1 (Jul 8, 2022) MIT -https://github.com/untitaker/python-atomicwrites - - - - -attrs - -19.1.0 (Mar 3, 2019) - -MIT - -https://github.com/python-attrs/attrs
-Added (July 2019) to enable the use of packaging lib that depends on it. +https://github.com/untitaker/python-atomicwrites
+No changes.
+Updated (April 2025) to help address py3.12 update. colorama -0.4.1 (Nov 25, 2018) +0.4.6 (Oct 24, 2022) BSD 3-Clause https://github.com/tartley/colorama
+No changes.
+Updated (April 2025) to help address py3.12 update. distlib -0.2.9.post0 (May 14, 2019) +0.3.9 (Oct 29, 2024) PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 https://bitbucket.org/pypa/distlib/src/master/
-Updated (June 2019) to enable wheel distribution based installations. +Updated (April 2025) to help address py3.12 update. distro -1.5.0 (Mar 31, 2020) +1.9.0 (Dec 24, 2023) Apache 2.0 -https://github.com/python-distro/distro +https://github.com/python-distro/distro
+No changes.
+Updated (April 2025) to help address py3.12 update. @@ -161,12 +155,13 @@ https://github.com/pika/pika progress -1.5 (Mar 6, 2019) +1.6 (July 28, 2021) ISC https://github.com/verigak/progress
-Upgraded from 1.2 to 1.5 as of Oct 16 2019 +No changes.
+Updated (April 2025) to help address py3.12 update. @@ -227,37 +222,26 @@ Our version is patched. six -1.12.0 (Dec 9, 2018) +1.17.0 (Dec 4, 2024) MIT https://github.com/benjaminp/six
-Updated (July 2019) to coincide with packaging lib addition that depends on. -Also now required to support py2/3 interoperability. - - - - -yaml lib (PyYAML) - -5.1 (May 30, 2011) - -MIT - -https://github.com/yaml/pyyaml
-No changes but must maintain separate version between py2 and py3 for time being. +Updated (April 2025) to help address py3.12 update.
+No longer needed in rez itself, but still used by other vendored modules. -yaml lib3 (PyYAML) +yaml (PyYAML) -5.1 (May 30, 2011) +6.0.1 (July 17, 2023) MIT https://github.com/yaml/pyyaml
-No changes but must maintain separate version between py2 and py3 for time being. +No changes. Bounded to 6.0.1 by current py3.7.
+Updated (April 2025) to help address py3.12 update. diff --git a/src/rez/vendor/atomicwrites/__init__.py b/src/rez/vendor/atomicwrites/__init__.py index 4f43d762a..669191bb5 100644 --- a/src/rez/vendor/atomicwrites/__init__.py +++ b/src/rez/vendor/atomicwrites/__init__.py @@ -9,7 +9,13 @@ except ImportError: fcntl = None -__version__ = '1.2.1' +# `fspath` was added in Python 3.6 +try: + from os import fspath +except ImportError: + fspath = None + +__version__ = '1.4.1' PY2 = sys.version_info[0] == 2 @@ -86,6 +92,7 @@ def replace_atomic(src, dst): ''' Move ``src`` to ``dst``. If ``dst`` exists, it will be silently overwritten. + Both paths must reside on the same filesystem for the operation to be atomic. ''' @@ -97,6 +104,7 @@ def move_atomic(src, dst): Move ``src`` to ``dst``. There might a timewindow where both filesystem entries exist. If ``dst`` already exists, :py:exc:`FileExistsError` will be raised. + Both paths must reside on the same filesystem for the operation to be atomic. ''' @@ -106,14 +114,20 @@ def move_atomic(src, dst): class AtomicWriter(object): ''' A helper class for performing atomic writes. Usage:: + with AtomicWriter(path).open() as f: f.write(...) + :param path: The destination filepath. May or may not exist. :param mode: The filemode for the temporary file. This defaults to `wb` in Python 2 and `w` in Python 3. :param overwrite: If set to false, an error is raised if ``path`` exists. Errors are only raised after the file has been written to. Either way, the operation is atomic. + :param open_kwargs: Keyword-arguments to pass to the underlying + :py:func:`open` call. This can be used to set the encoding when opening + files in text-mode. + If you need further control over the exact behavior, you are encouraged to subclass. ''' @@ -132,6 +146,10 @@ def __init__(self, path, mode=DEFAULT_MODE, overwrite=False, if 'w' not in mode: raise ValueError('AtomicWriters can only be written to.') + # Attempt to convert `path` to `str` or `bytes` + if fspath is not None: + path = fspath(path) + self._path = path self._mode = mode self._overwrite = overwrite @@ -160,11 +178,13 @@ def _open(self, get_fileobject): except Exception: pass - def get_fileobject(self, dir=None, **kwargs): + def get_fileobject(self, suffix="", prefix=tempfile.gettempprefix(), + dir=None, **kwargs): '''Return the temporary file to use.''' if dir is None: dir = os.path.normpath(os.path.dirname(self._path)) - descriptor, name = tempfile.mkstemp(dir=dir) + descriptor, name = tempfile.mkstemp(suffix=suffix, prefix=prefix, + dir=dir) # io.open() will take either the descriptor or the name, but we need # the name later for commit()/replace_atomic() and couldn't find a way # to get the filename from the descriptor. @@ -194,12 +214,15 @@ def rollback(self, f): def atomic_write(path, writer_cls=AtomicWriter, **cls_kwargs): ''' Simple atomic writes. This wraps :py:class:`AtomicWriter`:: + with atomic_write(path) as f: f.write(...) + :param path: The target path to write to. :param writer_cls: The writer class to use. This parameter is useful if you subclassed :py:class:`AtomicWriter` to change some behavior and want to use that new subclass. + Additional keyword arguments are passed to the writer class. See :py:class:`AtomicWriter`. ''' diff --git a/src/rez/vendor/attr/LICENSE b/src/rez/vendor/attr/LICENSE deleted file mode 100644 index a1374eabb..000000000 --- a/src/rez/vendor/attr/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Hynek Schlawack - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/src/rez/vendor/attr/__init__.py b/src/rez/vendor/attr/__init__.py deleted file mode 100644 index 0ebe5197a..000000000 --- a/src/rez/vendor/attr/__init__.py +++ /dev/null @@ -1,65 +0,0 @@ -from __future__ import absolute_import, division, print_function - -from functools import partial - -from . import converters, exceptions, filters, validators -from ._config import get_run_validators, set_run_validators -from ._funcs import asdict, assoc, astuple, evolve, has -from ._make import ( - NOTHING, - Attribute, - Factory, - attrib, - attrs, - fields, - fields_dict, - make_class, - validate, -) - - -__version__ = "19.1.0" - -__title__ = "attrs" -__description__ = "Classes Without Boilerplate" -__url__ = "https://www.attrs.org/" -__uri__ = __url__ -__doc__ = __description__ + " <" + __uri__ + ">" - -__author__ = "Hynek Schlawack" -__email__ = "hs@ox.cx" - -__license__ = "MIT" -__copyright__ = "Copyright (c) 2015 Hynek Schlawack" - - -s = attributes = attrs -ib = attr = attrib -dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) - -__all__ = [ - "Attribute", - "Factory", - "NOTHING", - "asdict", - "assoc", - "astuple", - "attr", - "attrib", - "attributes", - "attrs", - "converters", - "evolve", - "exceptions", - "fields", - "fields_dict", - "filters", - "get_run_validators", - "has", - "ib", - "make_class", - "s", - "set_run_validators", - "validate", - "validators", -] diff --git a/src/rez/vendor/attr/__init__.pyi b/src/rez/vendor/attr/__init__.pyi deleted file mode 100644 index fcb93b18e..000000000 --- a/src/rez/vendor/attr/__init__.pyi +++ /dev/null @@ -1,255 +0,0 @@ -from typing import ( - Any, - Callable, - Dict, - Generic, - List, - Optional, - Sequence, - Mapping, - Tuple, - Type, - TypeVar, - Union, - overload, -) - -# `import X as X` is required to make these public -from . import exceptions as exceptions -from . import filters as filters -from . import converters as converters -from . import validators as validators - -_T = TypeVar("_T") -_C = TypeVar("_C", bound=type) - -_ValidatorType = Callable[[Any, Attribute[_T], _T], Any] -_ConverterType = Callable[[Any], _T] -_FilterType = Callable[[Attribute[_T], _T], bool] -# FIXME: in reality, if multiple validators are passed they must be in a list or tuple, -# but those are invariant and so would prevent subtypes of _ValidatorType from working -# when passed in a list or tuple. -_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]] - -# _make -- - -NOTHING: object - -# NOTE: Factory lies about its return type to make this possible: `x: List[int] = Factory(list)` -# Work around mypy issue #4554 in the common case by using an overload. -@overload -def Factory(factory: Callable[[], _T]) -> _T: ... -@overload -def Factory( - factory: Union[Callable[[Any], _T], Callable[[], _T]], - takes_self: bool = ..., -) -> _T: ... - -class Attribute(Generic[_T]): - name: str - default: Optional[_T] - validator: Optional[_ValidatorType[_T]] - repr: bool - cmp: bool - hash: Optional[bool] - init: bool - converter: Optional[_ConverterType[_T]] - metadata: Dict[Any, Any] - type: Optional[Type[_T]] - kw_only: bool - def __lt__(self, x: Attribute[_T]) -> bool: ... - def __le__(self, x: Attribute[_T]) -> bool: ... - def __gt__(self, x: Attribute[_T]) -> bool: ... - def __ge__(self, x: Attribute[_T]) -> bool: ... - -# NOTE: We had several choices for the annotation to use for type arg: -# 1) Type[_T] -# - Pros: Handles simple cases correctly -# - Cons: Might produce less informative errors in the case of conflicting TypeVars -# e.g. `attr.ib(default='bad', type=int)` -# 2) Callable[..., _T] -# - Pros: Better error messages than #1 for conflicting TypeVars -# - Cons: Terrible error messages for validator checks. -# e.g. attr.ib(type=int, validator=validate_str) -# -> error: Cannot infer function type argument -# 3) type (and do all of the work in the mypy plugin) -# - Pros: Simple here, and we could customize the plugin with our own errors. -# - Cons: Would need to write mypy plugin code to handle all the cases. -# We chose option #1. - -# `attr` lies about its return type to make the following possible: -# attr() -> Any -# attr(8) -> int -# attr(validator=) -> Whatever the callable expects. -# This makes this type of assignments possible: -# x: int = attr(8) -# -# This form catches explicit None or no default but with no other arguments returns Any. -@overload -def attrib( - default: None = ..., - validator: None = ..., - repr: bool = ..., - cmp: bool = ..., - hash: Optional[bool] = ..., - init: bool = ..., - convert: None = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: None = ..., - converter: None = ..., - factory: None = ..., - kw_only: bool = ..., -) -> Any: ... - -# This form catches an explicit None or no default and infers the type from the other arguments. -@overload -def attrib( - default: None = ..., - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: bool = ..., - cmp: bool = ..., - hash: Optional[bool] = ..., - init: bool = ..., - convert: Optional[_ConverterType[_T]] = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: Optional[Type[_T]] = ..., - converter: Optional[_ConverterType[_T]] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., -) -> _T: ... - -# This form catches an explicit default argument. -@overload -def attrib( - default: _T, - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: bool = ..., - cmp: bool = ..., - hash: Optional[bool] = ..., - init: bool = ..., - convert: Optional[_ConverterType[_T]] = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: Optional[Type[_T]] = ..., - converter: Optional[_ConverterType[_T]] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., -) -> _T: ... - -# This form covers type=non-Type: e.g. forward references (str), Any -@overload -def attrib( - default: Optional[_T] = ..., - validator: Optional[_ValidatorArgType[_T]] = ..., - repr: bool = ..., - cmp: bool = ..., - hash: Optional[bool] = ..., - init: bool = ..., - convert: Optional[_ConverterType[_T]] = ..., - metadata: Optional[Mapping[Any, Any]] = ..., - type: object = ..., - converter: Optional[_ConverterType[_T]] = ..., - factory: Optional[Callable[[], _T]] = ..., - kw_only: bool = ..., -) -> Any: ... -@overload -def attrs( - maybe_cls: _C, - these: Optional[Dict[str, Any]] = ..., - repr_ns: Optional[str] = ..., - repr: bool = ..., - cmp: bool = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., -) -> _C: ... -@overload -def attrs( - maybe_cls: None = ..., - these: Optional[Dict[str, Any]] = ..., - repr_ns: Optional[str] = ..., - repr: bool = ..., - cmp: bool = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., -) -> Callable[[_C], _C]: ... - -# TODO: add support for returning NamedTuple from the mypy plugin -class _Fields(Tuple[Attribute[Any], ...]): - def __getattr__(self, name: str) -> Attribute[Any]: ... - -def fields(cls: type) -> _Fields: ... -def fields_dict(cls: type) -> Dict[str, Attribute[Any]]: ... -def validate(inst: Any) -> None: ... - -# TODO: add support for returning a proper attrs class from the mypy plugin -# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', [attr.ib()])` is valid -def make_class( - name: str, - attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]], - bases: Tuple[type, ...] = ..., - repr_ns: Optional[str] = ..., - repr: bool = ..., - cmp: bool = ..., - hash: Optional[bool] = ..., - init: bool = ..., - slots: bool = ..., - frozen: bool = ..., - weakref_slot: bool = ..., - str: bool = ..., - auto_attribs: bool = ..., - kw_only: bool = ..., - cache_hash: bool = ..., - auto_exc: bool = ..., -) -> type: ... - -# _funcs -- - -# TODO: add support for returning TypedDict from the mypy plugin -# FIXME: asdict/astuple do not honor their factory args. waiting on one of these: -# https://github.com/python/mypy/issues/4236 -# https://github.com/python/typing/issues/253 -def asdict( - inst: Any, - recurse: bool = ..., - filter: Optional[_FilterType[Any]] = ..., - dict_factory: Type[Mapping[Any, Any]] = ..., - retain_collection_types: bool = ..., -) -> Dict[str, Any]: ... - -# TODO: add support for returning NamedTuple from the mypy plugin -def astuple( - inst: Any, - recurse: bool = ..., - filter: Optional[_FilterType[Any]] = ..., - tuple_factory: Type[Sequence[Any]] = ..., - retain_collection_types: bool = ..., -) -> Tuple[Any, ...]: ... -def has(cls: type) -> bool: ... -def assoc(inst: _T, **changes: Any) -> _T: ... -def evolve(inst: _T, **changes: Any) -> _T: ... - -# _config -- - -def set_run_validators(run: bool) -> None: ... -def get_run_validators() -> bool: ... - -# aliases -- - -s = attributes = attrs -ib = attr = attrib -dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/src/rez/vendor/attr/_compat.py b/src/rez/vendor/attr/_compat.py deleted file mode 100644 index 9a99dcd96..000000000 --- a/src/rez/vendor/attr/_compat.py +++ /dev/null @@ -1,159 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import platform -import sys -import types -import warnings - - -PY2 = sys.version_info[0] == 2 -PYPY = platform.python_implementation() == "PyPy" - - -if PYPY or sys.version_info[:2] >= (3, 6): - ordered_dict = dict -else: - from collections import OrderedDict - - ordered_dict = OrderedDict - - -if PY2: - from UserDict import IterableUserDict - from collections import Mapping, Sequence # noqa - - # We 'bundle' isclass instead of using inspect as importing inspect is - # fairly expensive (order of 10-15 ms for a modern machine in 2016) - def isclass(klass): - return isinstance(klass, (type, types.ClassType)) - - # TYPE is used in exceptions, repr(int) is different on Python 2 and 3. - TYPE = "type" - - def iteritems(d): - return d.iteritems() - - # Python 2 is bereft of a read-only dict proxy, so we make one! - class ReadOnlyDict(IterableUserDict): - """ - Best-effort read-only dict wrapper. - """ - - def __setitem__(self, key, val): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item assignment" - ) - - def update(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'update'" - ) - - def __delitem__(self, _): - # We gently pretend we're a Python 3 mappingproxy. - raise TypeError( - "'mappingproxy' object does not support item deletion" - ) - - def clear(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'clear'" - ) - - def pop(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'pop'" - ) - - def popitem(self): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'popitem'" - ) - - def setdefault(self, key, default=None): - # We gently pretend we're a Python 3 mappingproxy. - raise AttributeError( - "'mappingproxy' object has no attribute 'setdefault'" - ) - - def __repr__(self): - # Override to be identical to the Python 3 version. - return "mappingproxy(" + repr(self.data) + ")" - - def metadata_proxy(d): - res = ReadOnlyDict() - res.data.update(d) # We blocked update, so we have to do it like this. - return res - - def just_warn(*args, **kw): # pragma: nocover - """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. - """ - - -else: # Python 3 and later. - from collections.abc import Mapping, Sequence # noqa - - def just_warn(*args, **kw): - """ - We only warn on Python 3 because we are not aware of any concrete - consequences of not setting the cell on Python 2. - """ - warnings.warn( - "Missing ctypes. Some features like bare super() or accessing " - "__class__ will not work with slotted classes.", - RuntimeWarning, - stacklevel=2, - ) - - def isclass(klass): - return isinstance(klass, type) - - TYPE = "class" - - def iteritems(d): - return d.items() - - def metadata_proxy(d): - return types.MappingProxyType(dict(d)) - - -def import_ctypes(): - """ - Moved into a function for testability. - """ - import ctypes - - return ctypes - - -def make_set_closure_cell(): - """ - Moved into a function for testability. - """ - if PYPY: # pragma: no cover - - def set_closure_cell(cell, value): - cell.__setstate__((value,)) - - else: - try: - ctypes = import_ctypes() - - set_closure_cell = ctypes.pythonapi.PyCell_Set - set_closure_cell.argtypes = (ctypes.py_object, ctypes.py_object) - set_closure_cell.restype = ctypes.c_int - except Exception: - # We try best effort to set the cell, but sometimes it's not - # possible. For example on Jython or on GAE. - set_closure_cell = just_warn - return set_closure_cell - - -set_closure_cell = make_set_closure_cell() diff --git a/src/rez/vendor/attr/_config.py b/src/rez/vendor/attr/_config.py deleted file mode 100644 index 8ec920962..000000000 --- a/src/rez/vendor/attr/_config.py +++ /dev/null @@ -1,23 +0,0 @@ -from __future__ import absolute_import, division, print_function - - -__all__ = ["set_run_validators", "get_run_validators"] - -_run_validators = True - - -def set_run_validators(run): - """ - Set whether or not validators are run. By default, they are run. - """ - if not isinstance(run, bool): - raise TypeError("'run' must be bool.") - global _run_validators - _run_validators = run - - -def get_run_validators(): - """ - Return whether or not validators are run. - """ - return _run_validators diff --git a/src/rez/vendor/attr/_funcs.py b/src/rez/vendor/attr/_funcs.py deleted file mode 100644 index b61d23941..000000000 --- a/src/rez/vendor/attr/_funcs.py +++ /dev/null @@ -1,290 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import copy - -from ._compat import iteritems -from ._make import NOTHING, _obj_setattr, fields -from .exceptions import AttrsAttributeNotFoundError - - -def asdict( - inst, - recurse=True, - filter=None, - dict_factory=dict, - retain_collection_types=False, -): - """ - Return the ``attrs`` attribute values of *inst* as a dict. - - Optionally recurse into other ``attrs``-decorated classes. - - :param inst: Instance of an ``attrs``-decorated class. - :param bool recurse: Recurse into classes that are also - ``attrs``-decorated. - :param callable filter: A callable whose return code determines whether an - attribute or element is included (``True``) or dropped (``False``). Is - called with the :class:`attr.Attribute` as the first argument and the - value as the second argument. - :param callable dict_factory: A callable to produce dictionaries from. For - example, to produce ordered dictionaries instead of normal Python - dictionaries, pass in ``collections.OrderedDict``. - :param bool retain_collection_types: Do not convert to ``list`` when - encountering an attribute whose type is ``tuple`` or ``set``. Only - meaningful if ``recurse`` is ``True``. - - :rtype: return type of *dict_factory* - - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. versionadded:: 16.0.0 *dict_factory* - .. versionadded:: 16.1.0 *retain_collection_types* - """ - attrs = fields(inst.__class__) - rv = dict_factory() - for a in attrs: - v = getattr(inst, a.name) - if filter is not None and not filter(a, v): - continue - if recurse is True: - if has(v.__class__): - rv[a.name] = asdict( - v, True, filter, dict_factory, retain_collection_types - ) - elif isinstance(v, (tuple, list, set)): - cf = v.__class__ if retain_collection_types is True else list - rv[a.name] = cf( - [ - _asdict_anything( - i, filter, dict_factory, retain_collection_types - ) - for i in v - ] - ) - elif isinstance(v, dict): - df = dict_factory - rv[a.name] = df( - ( - _asdict_anything( - kk, filter, df, retain_collection_types - ), - _asdict_anything( - vv, filter, df, retain_collection_types - ), - ) - for kk, vv in iteritems(v) - ) - else: - rv[a.name] = v - else: - rv[a.name] = v - return rv - - -def _asdict_anything(val, filter, dict_factory, retain_collection_types): - """ - ``asdict`` only works on attrs instances, this works on anything. - """ - if getattr(val.__class__, "__attrs_attrs__", None) is not None: - # Attrs class. - rv = asdict(val, True, filter, dict_factory, retain_collection_types) - elif isinstance(val, (tuple, list, set)): - cf = val.__class__ if retain_collection_types is True else list - rv = cf( - [ - _asdict_anything( - i, filter, dict_factory, retain_collection_types - ) - for i in val - ] - ) - elif isinstance(val, dict): - df = dict_factory - rv = df( - ( - _asdict_anything(kk, filter, df, retain_collection_types), - _asdict_anything(vv, filter, df, retain_collection_types), - ) - for kk, vv in iteritems(val) - ) - else: - rv = val - return rv - - -def astuple( - inst, - recurse=True, - filter=None, - tuple_factory=tuple, - retain_collection_types=False, -): - """ - Return the ``attrs`` attribute values of *inst* as a tuple. - - Optionally recurse into other ``attrs``-decorated classes. - - :param inst: Instance of an ``attrs``-decorated class. - :param bool recurse: Recurse into classes that are also - ``attrs``-decorated. - :param callable filter: A callable whose return code determines whether an - attribute or element is included (``True``) or dropped (``False``). Is - called with the :class:`attr.Attribute` as the first argument and the - value as the second argument. - :param callable tuple_factory: A callable to produce tuples from. For - example, to produce lists instead of tuples. - :param bool retain_collection_types: Do not convert to ``list`` - or ``dict`` when encountering an attribute which type is - ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is - ``True``. - - :rtype: return type of *tuple_factory* - - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. versionadded:: 16.2.0 - """ - attrs = fields(inst.__class__) - rv = [] - retain = retain_collection_types # Very long. :/ - for a in attrs: - v = getattr(inst, a.name) - if filter is not None and not filter(a, v): - continue - if recurse is True: - if has(v.__class__): - rv.append( - astuple( - v, - recurse=True, - filter=filter, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - ) - elif isinstance(v, (tuple, list, set)): - cf = v.__class__ if retain is True else list - rv.append( - cf( - [ - astuple( - j, - recurse=True, - filter=filter, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(j.__class__) - else j - for j in v - ] - ) - ) - elif isinstance(v, dict): - df = v.__class__ if retain is True else dict - rv.append( - df( - ( - astuple( - kk, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(kk.__class__) - else kk, - astuple( - vv, - tuple_factory=tuple_factory, - retain_collection_types=retain, - ) - if has(vv.__class__) - else vv, - ) - for kk, vv in iteritems(v) - ) - ) - else: - rv.append(v) - else: - rv.append(v) - return rv if tuple_factory is list else tuple_factory(rv) - - -def has(cls): - """ - Check whether *cls* is a class with ``attrs`` attributes. - - :param type cls: Class to introspect. - :raise TypeError: If *cls* is not a class. - - :rtype: :class:`bool` - """ - return getattr(cls, "__attrs_attrs__", None) is not None - - -def assoc(inst, **changes): - """ - Copy *inst* and apply *changes*. - - :param inst: Instance of a class with ``attrs`` attributes. - :param changes: Keyword changes in the new copy. - - :return: A copy of inst with *changes* incorporated. - - :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't - be found on *cls*. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. deprecated:: 17.1.0 - Use :func:`evolve` instead. - """ - import warnings - - warnings.warn( - "assoc is deprecated and will be removed after 2018/01.", - DeprecationWarning, - stacklevel=2, - ) - new = copy.copy(inst) - attrs = fields(inst.__class__) - for k, v in iteritems(changes): - a = getattr(attrs, k, NOTHING) - if a is NOTHING: - raise AttrsAttributeNotFoundError( - "{k} is not an attrs attribute on {cl}.".format( - k=k, cl=new.__class__ - ) - ) - _obj_setattr(new, k, v) - return new - - -def evolve(inst, **changes): - """ - Create a new instance, based on *inst* with *changes* applied. - - :param inst: Instance of a class with ``attrs`` attributes. - :param changes: Keyword changes in the new copy. - - :return: A copy of inst with *changes* incorporated. - - :raise TypeError: If *attr_name* couldn't be found in the class - ``__init__``. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - .. versionadded:: 17.1.0 - """ - cls = inst.__class__ - attrs = fields(cls) - for a in attrs: - if not a.init: - continue - attr_name = a.name # To deal with private attributes. - init_name = attr_name if attr_name[0] != "_" else attr_name[1:] - if init_name not in changes: - changes[init_name] = getattr(inst, attr_name) - return cls(**changes) diff --git a/src/rez/vendor/attr/_make.py b/src/rez/vendor/attr/_make.py deleted file mode 100644 index 827175a46..000000000 --- a/src/rez/vendor/attr/_make.py +++ /dev/null @@ -1,2086 +0,0 @@ -from __future__ import absolute_import, division, print_function - -import copy -import hashlib -import linecache -import sys -import threading -import warnings - -from operator import itemgetter - -from . import _config -from ._compat import ( - PY2, - isclass, - iteritems, - metadata_proxy, - ordered_dict, - set_closure_cell, -) -from .exceptions import ( - DefaultAlreadySetError, - FrozenInstanceError, - NotAnAttrsClassError, - PythonTooOldError, - UnannotatedAttributeError, -) - - -# This is used at least twice, so cache it here. -_obj_setattr = object.__setattr__ -_init_converter_pat = "__attr_converter_{}" -_init_factory_pat = "__attr_factory_{}" -_tuple_property_pat = ( - " {attr_name} = _attrs_property(_attrs_itemgetter({index}))" -) -_classvar_prefixes = ("typing.ClassVar", "t.ClassVar", "ClassVar") -# we don't use a double-underscore prefix because that triggers -# name mangling when trying to create a slot for the field -# (when slots=True) -_hash_cache_field = "_attrs_cached_hash" - -_empty_metadata_singleton = metadata_proxy({}) - - -class _Nothing(object): - """ - Sentinel class to indicate the lack of a value when ``None`` is ambiguous. - - ``_Nothing`` is a singleton. There is only ever one of it. - """ - - _singleton = None - - def __new__(cls): - if _Nothing._singleton is None: - _Nothing._singleton = super(_Nothing, cls).__new__(cls) - return _Nothing._singleton - - def __repr__(self): - return "NOTHING" - - -NOTHING = _Nothing() -""" -Sentinel to indicate the lack of a value when ``None`` is ambiguous. -""" - - -def attrib( - default=NOTHING, - validator=None, - repr=True, - cmp=True, - hash=None, - init=True, - convert=None, - metadata=None, - type=None, - converter=None, - factory=None, - kw_only=False, -): - """ - Create a new attribute on a class. - - .. warning:: - - Does *not* do anything unless the class is also decorated with - :func:`attr.s`! - - :param default: A value that is used if an ``attrs``-generated ``__init__`` - is used and no value is passed while instantiating or the attribute is - excluded using ``init=False``. - - If the value is an instance of :class:`Factory`, its callable will be - used to construct a new value (useful for mutable data types like lists - or dicts). - - If a default is not set (or set manually to ``attr.NOTHING``), a value - *must* be supplied when instantiating; otherwise a :exc:`TypeError` - will be raised. - - The default can also be set using decorator notation as shown below. - - :type default: Any value. - - :param callable factory: Syntactic sugar for - ``default=attr.Factory(callable)``. - - :param validator: :func:`callable` that is called by ``attrs``-generated - ``__init__`` methods after the instance has been initialized. They - receive the initialized instance, the :class:`Attribute`, and the - passed value. - - The return value is *not* inspected so the validator has to throw an - exception itself. - - If a ``list`` is passed, its items are treated as validators and must - all pass. - - Validators can be globally disabled and re-enabled using - :func:`get_run_validators`. - - The validator can also be set using decorator notation as shown below. - - :type validator: ``callable`` or a ``list`` of ``callable``\\ s. - - :param bool repr: Include this attribute in the generated ``__repr__`` - method. - :param bool cmp: Include this attribute in the generated comparison methods - (``__eq__`` et al). - :param hash: Include this attribute in the generated ``__hash__`` - method. If ``None`` (default), mirror *cmp*'s value. This is the - correct behavior according the Python spec. Setting this value to - anything else than ``None`` is *discouraged*. - :type hash: ``bool`` or ``None`` - :param bool init: Include this attribute in the generated ``__init__`` - method. It is possible to set this to ``False`` and set a default - value. In that case this attributed is unconditionally initialized - with the specified default value or factory. - :param callable converter: :func:`callable` that is called by - ``attrs``-generated ``__init__`` methods to converter attribute's value - to the desired format. It is given the passed-in value, and the - returned value will be used as the new value of the attribute. The - value is converted before being passed to the validator, if any. - :param metadata: An arbitrary mapping, to be used by third-party - components. See :ref:`extending_metadata`. - :param type: The type of the attribute. In Python 3.6 or greater, the - preferred method to specify the type is using a variable annotation - (see `PEP 526 `_). - This argument is provided for backward compatibility. - Regardless of the approach used, the type will be stored on - ``Attribute.type``. - - Please note that ``attrs`` doesn't do anything with this metadata by - itself. You can use it as part of your own code or for - :doc:`static type checking `. - :param kw_only: Make this attribute keyword-only (Python 3+) - in the generated ``__init__`` (if ``init`` is ``False``, this - parameter is ignored). - - .. versionadded:: 15.2.0 *convert* - .. versionadded:: 16.3.0 *metadata* - .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. - .. versionchanged:: 17.1.0 - *hash* is ``None`` and therefore mirrors *cmp* by default. - .. versionadded:: 17.3.0 *type* - .. deprecated:: 17.4.0 *convert* - .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated - *convert* to achieve consistency with other noun-based arguments. - .. versionadded:: 18.1.0 - ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. - .. versionadded:: 18.2.0 *kw_only* - """ - if hash is not None and hash is not True and hash is not False: - raise TypeError( - "Invalid value for hash. Must be True, False, or None." - ) - - if convert is not None: - if converter is not None: - raise RuntimeError( - "Can't pass both `convert` and `converter`. " - "Please use `converter` only." - ) - warnings.warn( - "The `convert` argument is deprecated in favor of `converter`. " - "It will be removed after 2019/01.", - DeprecationWarning, - stacklevel=2, - ) - converter = convert - - if factory is not None: - if default is not NOTHING: - raise ValueError( - "The `default` and `factory` arguments are mutually " - "exclusive." - ) - if not callable(factory): - raise ValueError("The `factory` argument must be a callable.") - default = Factory(factory) - - if metadata is None: - metadata = {} - - return _CountingAttr( - default=default, - validator=validator, - repr=repr, - cmp=cmp, - hash=hash, - init=init, - converter=converter, - metadata=metadata, - type=type, - kw_only=kw_only, - ) - - -def _make_attr_tuple_class(cls_name, attr_names): - """ - Create a tuple subclass to hold `Attribute`s for an `attrs` class. - - The subclass is a bare tuple with properties for names. - - class MyClassAttributes(tuple): - __slots__ = () - x = property(itemgetter(0)) - """ - attr_class_name = "{}Attributes".format(cls_name) - attr_class_template = [ - "class {}(tuple):".format(attr_class_name), - " __slots__ = ()", - ] - if attr_names: - for i, attr_name in enumerate(attr_names): - attr_class_template.append( - _tuple_property_pat.format(index=i, attr_name=attr_name) - ) - else: - attr_class_template.append(" pass") - globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} - eval(compile("\n".join(attr_class_template), "", "exec"), globs) - - return globs[attr_class_name] - - -# Tuple class for extracted attributes from a class definition. -# `base_attrs` is a subset of `attrs`. -_Attributes = _make_attr_tuple_class( - "_Attributes", - [ - # all attributes to build dunder methods for - "attrs", - # attributes that have been inherited - "base_attrs", - # map inherited attributes to their originating classes - "base_attrs_map", - ], -) - - -def _is_class_var(annot): - """ - Check whether *annot* is a typing.ClassVar. - - The string comparison hack is used to avoid evaluating all string - annotations which would put attrs-based classes at a performance - disadvantage compared to plain old classes. - """ - return str(annot).startswith(_classvar_prefixes) - - -def _get_annotations(cls): - """ - Get annotations for *cls*. - """ - anns = getattr(cls, "__annotations__", None) - if anns is None: - return {} - - # Verify that the annotations aren't merely inherited. - for base_cls in cls.__mro__[1:]: - if anns is getattr(base_cls, "__annotations__", None): - return {} - - return anns - - -def _counter_getter(e): - """ - Key function for sorting to avoid re-creating a lambda for every class. - """ - return e[1].counter - - -def _transform_attrs(cls, these, auto_attribs, kw_only): - """ - Transform all `_CountingAttr`s on a class into `Attribute`s. - - If *these* is passed, use that and don't look for them on the class. - - Return an `_Attributes`. - """ - cd = cls.__dict__ - anns = _get_annotations(cls) - - if these is not None: - ca_list = [(name, ca) for name, ca in iteritems(these)] - - if not isinstance(these, ordered_dict): - ca_list.sort(key=_counter_getter) - elif auto_attribs is True: - ca_names = { - name - for name, attr in cd.items() - if isinstance(attr, _CountingAttr) - } - ca_list = [] - annot_names = set() - for attr_name, type in anns.items(): - if _is_class_var(type): - continue - annot_names.add(attr_name) - a = cd.get(attr_name, NOTHING) - if not isinstance(a, _CountingAttr): - if a is NOTHING: - a = attrib() - else: - a = attrib(default=a) - ca_list.append((attr_name, a)) - - unannotated = ca_names - annot_names - if len(unannotated) > 0: - raise UnannotatedAttributeError( - "The following `attr.ib`s lack a type annotation: " - + ", ".join( - sorted(unannotated, key=lambda n: cd.get(n).counter) - ) - + "." - ) - else: - ca_list = sorted( - ( - (name, attr) - for name, attr in cd.items() - if isinstance(attr, _CountingAttr) - ), - key=lambda e: e[1].counter, - ) - - own_attrs = [ - Attribute.from_counting_attr( - name=attr_name, ca=ca, type=anns.get(attr_name) - ) - for attr_name, ca in ca_list - ] - - base_attrs = [] - base_attr_map = {} # A dictionary of base attrs to their classes. - taken_attr_names = {a.name: a for a in own_attrs} - - # Traverse the MRO and collect attributes. - for base_cls in cls.__mro__[1:-1]: - sub_attrs = getattr(base_cls, "__attrs_attrs__", None) - if sub_attrs is not None: - for a in sub_attrs: - prev_a = taken_attr_names.get(a.name) - # Only add an attribute if it hasn't been defined before. This - # allows for overwriting attribute definitions by subclassing. - if prev_a is None: - base_attrs.append(a) - taken_attr_names[a.name] = a - base_attr_map[a.name] = base_cls - - attr_names = [a.name for a in base_attrs + own_attrs] - - AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) - - if kw_only: - own_attrs = [a._assoc(kw_only=True) for a in own_attrs] - base_attrs = [a._assoc(kw_only=True) for a in base_attrs] - - attrs = AttrsClass(base_attrs + own_attrs) - - had_default = False - was_kw_only = False - for a in attrs: - if ( - was_kw_only is False - and had_default is True - and a.default is NOTHING - and a.init is True - and a.kw_only is False - ): - raise ValueError( - "No mandatory attributes allowed after an attribute with a " - "default value or factory. Attribute in question: %r" % (a,) - ) - elif ( - had_default is False - and a.default is not NOTHING - and a.init is not False - and - # Keyword-only attributes without defaults can be specified - # after keyword-only attributes with defaults. - a.kw_only is False - ): - had_default = True - if was_kw_only is True and a.kw_only is False and a.init is True: - raise ValueError( - "Non keyword-only attributes are not allowed after a " - "keyword-only attribute (unless they are init=False). " - "Attribute in question: {a!r}".format(a=a) - ) - if was_kw_only is False and a.init is True and a.kw_only is True: - was_kw_only = True - - return _Attributes((attrs, base_attrs, base_attr_map)) - - -def _frozen_setattrs(self, name, value): - """ - Attached to frozen classes as __setattr__. - """ - raise FrozenInstanceError() - - -def _frozen_delattrs(self, name): - """ - Attached to frozen classes as __delattr__. - """ - raise FrozenInstanceError() - - -class _ClassBuilder(object): - """ - Iteratively build *one* class. - """ - - __slots__ = ( - "_cls", - "_cls_dict", - "_attrs", - "_base_names", - "_attr_names", - "_slots", - "_frozen", - "_weakref_slot", - "_cache_hash", - "_has_post_init", - "_delete_attribs", - "_base_attr_map", - "_is_exc", - ) - - def __init__( - self, - cls, - these, - slots, - frozen, - weakref_slot, - auto_attribs, - kw_only, - cache_hash, - is_exc, - ): - attrs, base_attrs, base_map = _transform_attrs( - cls, these, auto_attribs, kw_only - ) - - self._cls = cls - self._cls_dict = dict(cls.__dict__) if slots else {} - self._attrs = attrs - self._base_names = set(a.name for a in base_attrs) - self._base_attr_map = base_map - self._attr_names = tuple(a.name for a in attrs) - self._slots = slots - self._frozen = frozen or _has_frozen_base_class(cls) - self._weakref_slot = weakref_slot - self._cache_hash = cache_hash - self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) - self._delete_attribs = not bool(these) - self._is_exc = is_exc - - self._cls_dict["__attrs_attrs__"] = self._attrs - - if frozen: - self._cls_dict["__setattr__"] = _frozen_setattrs - self._cls_dict["__delattr__"] = _frozen_delattrs - - def __repr__(self): - return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) - - def build_class(self): - """ - Finalize class based on the accumulated configuration. - - Builder cannot be used after calling this method. - """ - if self._slots is True: - return self._create_slots_class() - else: - return self._patch_original_class() - - def _patch_original_class(self): - """ - Apply accumulated methods and return the class. - """ - cls = self._cls - base_names = self._base_names - - # Clean class of attribute definitions (`attr.ib()`s). - if self._delete_attribs: - for name in self._attr_names: - if ( - name not in base_names - and getattr(cls, name, None) is not None - ): - try: - delattr(cls, name) - except AttributeError: - # This can happen if a base class defines a class - # variable and we want to set an attribute with the - # same name by using only a type annotation. - pass - - # Attach our dunder methods. - for name, value in self._cls_dict.items(): - setattr(cls, name, value) - - # Attach __setstate__. This is necessary to clear the hash code - # cache on deserialization. See issue - # https://github.com/python-attrs/attrs/issues/482 . - # Note that this code only handles setstate for dict classes. - # For slotted classes, see similar code in _create_slots_class . - if self._cache_hash: - existing_set_state_method = getattr(cls, "__setstate__", None) - if existing_set_state_method: - raise NotImplementedError( - "Currently you cannot use hash caching if " - "you specify your own __setstate__ method." - "See https://github.com/python-attrs/attrs/issues/494 ." - ) - - def cache_hash_set_state(chss_self, _): - # clear hash code cache - setattr(chss_self, _hash_cache_field, None) - - setattr(cls, "__setstate__", cache_hash_set_state) - - return cls - - def _create_slots_class(self): - """ - Build and return a new class with a `__slots__` attribute. - """ - base_names = self._base_names - cd = { - k: v - for k, v in iteritems(self._cls_dict) - if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") - } - - weakref_inherited = False - - # Traverse the MRO to check for an existing __weakref__. - for base_cls in self._cls.__mro__[1:-1]: - if "__weakref__" in getattr(base_cls, "__dict__", ()): - weakref_inherited = True - break - - names = self._attr_names - if ( - self._weakref_slot - and "__weakref__" not in getattr(self._cls, "__slots__", ()) - and "__weakref__" not in names - and not weakref_inherited - ): - names += ("__weakref__",) - - # We only add the names of attributes that aren't inherited. - # Settings __slots__ to inherited attributes wastes memory. - slot_names = [name for name in names if name not in base_names] - if self._cache_hash: - slot_names.append(_hash_cache_field) - cd["__slots__"] = tuple(slot_names) - - qualname = getattr(self._cls, "__qualname__", None) - if qualname is not None: - cd["__qualname__"] = qualname - - # __weakref__ is not writable. - state_attr_names = tuple( - an for an in self._attr_names if an != "__weakref__" - ) - - def slots_getstate(self): - """ - Automatically created by attrs. - """ - return tuple(getattr(self, name) for name in state_attr_names) - - hash_caching_enabled = self._cache_hash - - def slots_setstate(self, state): - """ - Automatically created by attrs. - """ - __bound_setattr = _obj_setattr.__get__(self, Attribute) - for name, value in zip(state_attr_names, state): - __bound_setattr(name, value) - # Clearing the hash code cache on deserialization is needed - # because hash codes can change from run to run. See issue - # https://github.com/python-attrs/attrs/issues/482 . - # Note that this code only handles setstate for slotted classes. - # For dict classes, see similar code in _patch_original_class . - if hash_caching_enabled: - __bound_setattr(_hash_cache_field, None) - - # slots and frozen require __getstate__/__setstate__ to work - cd["__getstate__"] = slots_getstate - cd["__setstate__"] = slots_setstate - - # Create new class based on old class and our methods. - cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) - - # The following is a fix for - # https://github.com/python-attrs/attrs/issues/102. On Python 3, - # if a method mentions `__class__` or uses the no-arg super(), the - # compiler will bake a reference to the class in the method itself - # as `method.__closure__`. Since we replace the class with a - # clone, we rewrite these references so it keeps working. - for item in cls.__dict__.values(): - if isinstance(item, (classmethod, staticmethod)): - # Class- and staticmethods hide their functions inside. - # These might need to be rewritten as well. - closure_cells = getattr(item.__func__, "__closure__", None) - else: - closure_cells = getattr(item, "__closure__", None) - - if not closure_cells: # Catch None or the empty list. - continue - for cell in closure_cells: - if cell.cell_contents is self._cls: - set_closure_cell(cell, cls) - - return cls - - def add_repr(self, ns): - self._cls_dict["__repr__"] = self._add_method_dunders( - _make_repr(self._attrs, ns=ns) - ) - return self - - def add_str(self): - repr = self._cls_dict.get("__repr__") - if repr is None: - raise ValueError( - "__str__ can only be generated if a __repr__ exists." - ) - - def __str__(self): - return self.__repr__() - - self._cls_dict["__str__"] = self._add_method_dunders(__str__) - return self - - def make_unhashable(self): - self._cls_dict["__hash__"] = None - return self - - def add_hash(self): - self._cls_dict["__hash__"] = self._add_method_dunders( - _make_hash( - self._attrs, frozen=self._frozen, cache_hash=self._cache_hash - ) - ) - - return self - - def add_init(self): - self._cls_dict["__init__"] = self._add_method_dunders( - _make_init( - self._attrs, - self._has_post_init, - self._frozen, - self._slots, - self._cache_hash, - self._base_attr_map, - self._is_exc, - ) - ) - - return self - - def add_cmp(self): - cd = self._cls_dict - - cd["__eq__"], cd["__ne__"], cd["__lt__"], cd["__le__"], cd[ - "__gt__" - ], cd["__ge__"] = ( - self._add_method_dunders(meth) for meth in _make_cmp(self._attrs) - ) - - return self - - def _add_method_dunders(self, method): - """ - Add __module__ and __qualname__ to a *method* if possible. - """ - try: - method.__module__ = self._cls.__module__ - except AttributeError: - pass - - try: - method.__qualname__ = ".".join( - (self._cls.__qualname__, method.__name__) - ) - except AttributeError: - pass - - return method - - -def attrs( - maybe_cls=None, - these=None, - repr_ns=None, - repr=True, - cmp=True, - hash=None, - init=True, - slots=False, - frozen=False, - weakref_slot=True, - str=False, - auto_attribs=False, - kw_only=False, - cache_hash=False, - auto_exc=False, -): - r""" - A class decorator that adds `dunder - `_\ -methods according to the - specified attributes using :func:`attr.ib` or the *these* argument. - - :param these: A dictionary of name to :func:`attr.ib` mappings. This is - useful to avoid the definition of your attributes within the class body - because you can't (e.g. if you want to add ``__repr__`` methods to - Django models) or don't want to. - - If *these* is not ``None``, ``attrs`` will *not* search the class body - for attributes and will *not* remove any attributes from it. - - If *these* is an ordered dict (:class:`dict` on Python 3.6+, - :class:`collections.OrderedDict` otherwise), the order is deduced from - the order of the attributes inside *these*. Otherwise the order - of the definition of the attributes is used. - - :type these: :class:`dict` of :class:`str` to :func:`attr.ib` - - :param str repr_ns: When using nested classes, there's no way in Python 2 - to automatically detect that. Therefore it's possible to set the - namespace explicitly for a more meaningful ``repr`` output. - :param bool repr: Create a ``__repr__`` method with a human readable - representation of ``attrs`` attributes.. - :param bool str: Create a ``__str__`` method that is identical to - ``__repr__``. This is usually not necessary except for - :class:`Exception`\ s. - :param bool cmp: Create ``__eq__``, ``__ne__``, ``__lt__``, ``__le__``, - ``__gt__``, and ``__ge__`` methods that compare the class as if it were - a tuple of its ``attrs`` attributes. But the attributes are *only* - compared, if the types of both classes are *identical*! - :param hash: If ``None`` (default), the ``__hash__`` method is generated - according how *cmp* and *frozen* are set. - - 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. - 2. If *cmp* is True and *frozen* is False, ``__hash__`` will be set to - None, marking it unhashable (which it is). - 3. If *cmp* is False, ``__hash__`` will be left untouched meaning the - ``__hash__`` method of the base class will be used (if base class is - ``object``, this means it will fall back to id-based hashing.). - - Although not recommended, you can decide for yourself and force - ``attrs`` to create one (e.g. if the class is immutable even though you - didn't freeze it programmatically) by passing ``True`` or not. Both of - these cases are rather special and should be used carefully. - - See the `Python documentation \ - `_ - and the `GitHub issue that led to the default behavior \ - `_ for more details. - :type hash: ``bool`` or ``None`` - :param bool init: Create a ``__init__`` method that initializes the - ``attrs`` attributes. Leading underscores are stripped for the - argument name. If a ``__attrs_post_init__`` method exists on the - class, it will be called after the class is fully initialized. - :param bool slots: Create a slots_-style class that's more - memory-efficient. See :ref:`slots` for further ramifications. - :param bool frozen: Make instances immutable after initialization. If - someone attempts to modify a frozen instance, - :exc:`attr.exceptions.FrozenInstanceError` is raised. - - Please note: - - 1. This is achieved by installing a custom ``__setattr__`` method - on your class so you can't implement an own one. - - 2. True immutability is impossible in Python. - - 3. This *does* have a minor a runtime performance :ref:`impact - ` when initializing new instances. In other words: - ``__init__`` is slightly slower with ``frozen=True``. - - 4. If a class is frozen, you cannot modify ``self`` in - ``__attrs_post_init__`` or a self-written ``__init__``. You can - circumvent that limitation by using - ``object.__setattr__(self, "attribute_name", value)``. - - .. _slots: https://docs.python.org/3/reference/datamodel.html#slots - :param bool weakref_slot: Make instances weak-referenceable. This has no - effect unless ``slots`` is also enabled. - :param bool auto_attribs: If True, collect `PEP 526`_-annotated attributes - (Python 3.6 and later only) from the class body. - - In this case, you **must** annotate every field. If ``attrs`` - encounters a field that is set to an :func:`attr.ib` but lacks a type - annotation, an :exc:`attr.exceptions.UnannotatedAttributeError` is - raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't - want to set a type. - - If you assign a value to those attributes (e.g. ``x: int = 42``), that - value becomes the default value like if it were passed using - ``attr.ib(default=42)``. Passing an instance of :class:`Factory` also - works as expected. - - Attributes annotated as :data:`typing.ClassVar` are **ignored**. - - .. _`PEP 526`: https://www.python.org/dev/peps/pep-0526/ - :param bool kw_only: Make all attributes keyword-only (Python 3+) - in the generated ``__init__`` (if ``init`` is ``False``, this - parameter is ignored). - :param bool cache_hash: Ensure that the object's hash code is computed - only once and stored on the object. If this is set to ``True``, - hashing must be either explicitly or implicitly enabled for this - class. If the hash code is cached, avoid any reassignments of - fields involved in hash code computation or mutations of the objects - those fields point to after object creation. If such changes occur, - the behavior of the object's hash code is undefined. - :param bool auto_exc: If the class subclasses :class:`BaseException` - (which implicitly includes any subclass of any exception), the - following happens to behave like a well-behaved Python exceptions - class: - - - the values for *cmp* and *hash* are ignored and the instances compare - and hash by the instance's ids (N.B. ``attrs`` will *not* remove - existing implementations of ``__hash__`` or the equality methods. It - just won't add own ones.), - - all attributes that are either passed into ``__init__`` or have a - default value are additionally available as a tuple in the ``args`` - attribute, - - the value of *str* is ignored leaving ``__str__`` to base classes. - - .. versionadded:: 16.0.0 *slots* - .. versionadded:: 16.1.0 *frozen* - .. versionadded:: 16.3.0 *str* - .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. - .. versionchanged:: 17.1.0 - *hash* supports ``None`` as value which is also the default now. - .. versionadded:: 17.3.0 *auto_attribs* - .. versionchanged:: 18.1.0 - If *these* is passed, no attributes are deleted from the class body. - .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. - .. versionadded:: 18.2.0 *weakref_slot* - .. deprecated:: 18.2.0 - ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a - :class:`DeprecationWarning` if the classes compared are subclasses of - each other. ``__eq`` and ``__ne__`` never tried to compared subclasses - to each other. - .. versionadded:: 18.2.0 *kw_only* - .. versionadded:: 18.2.0 *cache_hash* - .. versionadded:: 19.1.0 *auto_exc* - """ - - def wrap(cls): - - if getattr(cls, "__class__", None) is None: - raise TypeError("attrs only works with new-style classes.") - - is_exc = auto_exc is True and issubclass(cls, BaseException) - - builder = _ClassBuilder( - cls, - these, - slots, - frozen, - weakref_slot, - auto_attribs, - kw_only, - cache_hash, - is_exc, - ) - - if repr is True: - builder.add_repr(repr_ns) - if str is True: - builder.add_str() - if cmp is True and not is_exc: - builder.add_cmp() - - if hash is not True and hash is not False and hash is not None: - # Can't use `hash in` because 1 == True for example. - raise TypeError( - "Invalid value for hash. Must be True, False, or None." - ) - elif hash is False or (hash is None and cmp is False): - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " hashing must be either explicitly or implicitly " - "enabled." - ) - elif ( - hash is True - or (hash is None and cmp is True and frozen is True) - and is_exc is False - ): - builder.add_hash() - else: - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " hashing must be either explicitly or implicitly " - "enabled." - ) - builder.make_unhashable() - - if init is True: - builder.add_init() - else: - if cache_hash: - raise TypeError( - "Invalid value for cache_hash. To use hash caching," - " init must be True." - ) - - return builder.build_class() - - # maybe_cls's type depends on the usage of the decorator. It's a class - # if it's used as `@attrs` but ``None`` if used as `@attrs()`. - if maybe_cls is None: - return wrap - else: - return wrap(maybe_cls) - - -_attrs = attrs -""" -Internal alias so we can use it in functions that take an argument called -*attrs*. -""" - - -if PY2: - - def _has_frozen_base_class(cls): - """ - Check whether *cls* has a frozen ancestor by looking at its - __setattr__. - """ - return ( - getattr(cls.__setattr__, "__module__", None) - == _frozen_setattrs.__module__ - and cls.__setattr__.__name__ == _frozen_setattrs.__name__ - ) - - -else: - - def _has_frozen_base_class(cls): - """ - Check whether *cls* has a frozen ancestor by looking at its - __setattr__. - """ - return cls.__setattr__ == _frozen_setattrs - - -def _attrs_to_tuple(obj, attrs): - """ - Create a tuple of all values of *obj*'s *attrs*. - """ - return tuple(getattr(obj, a.name) for a in attrs) - - -def _make_hash(attrs, frozen, cache_hash): - attrs = tuple( - a - for a in attrs - if a.hash is True or (a.hash is None and a.cmp is True) - ) - - tab = " " - - # We cache the generated hash methods for the same kinds of attributes. - sha1 = hashlib.sha1() - sha1.update(repr(attrs).encode("utf-8")) - unique_filename = "" % (sha1.hexdigest(),) - type_hash = hash(unique_filename) - - method_lines = ["def __hash__(self):"] - - def append_hash_computation_lines(prefix, indent): - """ - Generate the code for actually computing the hash code. - Below this will either be returned directly or used to compute - a value which is then cached, depending on the value of cache_hash - """ - method_lines.extend( - [indent + prefix + "hash((", indent + " %d," % (type_hash,)] - ) - - for a in attrs: - method_lines.append(indent + " self.%s," % a.name) - - method_lines.append(indent + " ))") - - if cache_hash: - method_lines.append(tab + "if self.%s is None:" % _hash_cache_field) - if frozen: - append_hash_computation_lines( - "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2 - ) - method_lines.append(tab * 2 + ")") # close __setattr__ - else: - append_hash_computation_lines( - "self.%s = " % _hash_cache_field, tab * 2 - ) - method_lines.append(tab + "return self.%s" % _hash_cache_field) - else: - append_hash_computation_lines("return ", tab) - - script = "\n".join(method_lines) - globs = {} - locs = {} - bytecode = compile(script, unique_filename, "exec") - eval(bytecode, globs, locs) - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - linecache.cache[unique_filename] = ( - len(script), - None, - script.splitlines(True), - unique_filename, - ) - - return locs["__hash__"] - - -def _add_hash(cls, attrs): - """ - Add a hash method to *cls*. - """ - cls.__hash__ = _make_hash(attrs, frozen=False, cache_hash=False) - return cls - - -def __ne__(self, other): - """ - Check equality and either forward a NotImplemented or return the result - negated. - """ - result = self.__eq__(other) - if result is NotImplemented: - return NotImplemented - - return not result - - -WARNING_CMP_ISINSTANCE = ( - "Comparision of subclasses using __%s__ is deprecated and will be removed " - "in 2019." -) - - -def _make_cmp(attrs): - attrs = [a for a in attrs if a.cmp] - - # We cache the generated eq methods for the same kinds of attributes. - sha1 = hashlib.sha1() - sha1.update(repr(attrs).encode("utf-8")) - unique_filename = "" % (sha1.hexdigest(),) - lines = [ - "def __eq__(self, other):", - " if other.__class__ is not self.__class__:", - " return NotImplemented", - ] - # We can't just do a big self.x = other.x and... clause due to - # irregularities like nan == nan is false but (nan,) == (nan,) is true. - if attrs: - lines.append(" return (") - others = [" ) == ("] - for a in attrs: - lines.append(" self.%s," % (a.name,)) - others.append(" other.%s," % (a.name,)) - - lines += others + [" )"] - else: - lines.append(" return True") - - script = "\n".join(lines) - globs = {} - locs = {} - bytecode = compile(script, unique_filename, "exec") - eval(bytecode, globs, locs) - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - linecache.cache[unique_filename] = ( - len(script), - None, - script.splitlines(True), - unique_filename, - ) - eq = locs["__eq__"] - ne = __ne__ - - def attrs_to_tuple(obj): - """ - Save us some typing. - """ - return _attrs_to_tuple(obj, attrs) - - def __lt__(self, other): - """ - Automatically created by attrs. - """ - if isinstance(other, self.__class__): - if other.__class__ is not self.__class__: - warnings.warn( - WARNING_CMP_ISINSTANCE % ("lt",), DeprecationWarning - ) - return attrs_to_tuple(self) < attrs_to_tuple(other) - else: - return NotImplemented - - def __le__(self, other): - """ - Automatically created by attrs. - """ - if isinstance(other, self.__class__): - if other.__class__ is not self.__class__: - warnings.warn( - WARNING_CMP_ISINSTANCE % ("le",), DeprecationWarning - ) - return attrs_to_tuple(self) <= attrs_to_tuple(other) - else: - return NotImplemented - - def __gt__(self, other): - """ - Automatically created by attrs. - """ - if isinstance(other, self.__class__): - if other.__class__ is not self.__class__: - warnings.warn( - WARNING_CMP_ISINSTANCE % ("gt",), DeprecationWarning - ) - return attrs_to_tuple(self) > attrs_to_tuple(other) - else: - return NotImplemented - - def __ge__(self, other): - """ - Automatically created by attrs. - """ - if isinstance(other, self.__class__): - if other.__class__ is not self.__class__: - warnings.warn( - WARNING_CMP_ISINSTANCE % ("ge",), DeprecationWarning - ) - return attrs_to_tuple(self) >= attrs_to_tuple(other) - else: - return NotImplemented - - return eq, ne, __lt__, __le__, __gt__, __ge__ - - -def _add_cmp(cls, attrs=None): - """ - Add comparison methods to *cls*. - """ - if attrs is None: - attrs = cls.__attrs_attrs__ - - cls.__eq__, cls.__ne__, cls.__lt__, cls.__le__, cls.__gt__, cls.__ge__ = _make_cmp( # noqa - attrs - ) - - return cls - - -_already_repring = threading.local() - - -def _make_repr(attrs, ns): - """ - Make a repr method for *attr_names* adding *ns* to the full name. - """ - attr_names = tuple(a.name for a in attrs if a.repr) - - def __repr__(self): - """ - Automatically created by attrs. - """ - try: - working_set = _already_repring.working_set - except AttributeError: - working_set = set() - _already_repring.working_set = working_set - - if id(self) in working_set: - return "..." - real_cls = self.__class__ - if ns is None: - qualname = getattr(real_cls, "__qualname__", None) - if qualname is not None: - class_name = qualname.rsplit(">.", 1)[-1] - else: - class_name = real_cls.__name__ - else: - class_name = ns + "." + real_cls.__name__ - - # Since 'self' remains on the stack (i.e.: strongly referenced) for the - # duration of this call, it's safe to depend on id(...) stability, and - # not need to track the instance and therefore worry about properties - # like weakref- or hash-ability. - working_set.add(id(self)) - try: - result = [class_name, "("] - first = True - for name in attr_names: - if first: - first = False - else: - result.append(", ") - result.extend((name, "=", repr(getattr(self, name, NOTHING)))) - return "".join(result) + ")" - finally: - working_set.remove(id(self)) - - return __repr__ - - -def _add_repr(cls, ns=None, attrs=None): - """ - Add a repr method to *cls*. - """ - if attrs is None: - attrs = cls.__attrs_attrs__ - - cls.__repr__ = _make_repr(attrs, ns) - return cls - - -def _make_init( - attrs, post_init, frozen, slots, cache_hash, base_attr_map, is_exc -): - attrs = [a for a in attrs if a.init or a.default is not NOTHING] - - # We cache the generated init methods for the same kinds of attributes. - sha1 = hashlib.sha1() - sha1.update(repr(attrs).encode("utf-8")) - unique_filename = "".format(sha1.hexdigest()) - - script, globs, annotations = _attrs_to_init_script( - attrs, frozen, slots, post_init, cache_hash, base_attr_map, is_exc - ) - locs = {} - bytecode = compile(script, unique_filename, "exec") - attr_dict = dict((a.name, a) for a in attrs) - globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) - - if frozen is True: - # Save the lookup overhead in __init__ if we need to circumvent - # immutability. - globs["_cached_setattr"] = _obj_setattr - - eval(bytecode, globs, locs) - - # In order of debuggers like PDB being able to step through the code, - # we add a fake linecache entry. - linecache.cache[unique_filename] = ( - len(script), - None, - script.splitlines(True), - unique_filename, - ) - - __init__ = locs["__init__"] - __init__.__annotations__ = annotations - - return __init__ - - -def fields(cls): - """ - Return the tuple of ``attrs`` attributes for a class. - - The tuple also allows accessing the fields by their names (see below for - examples). - - :param type cls: Class to introspect. - - :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - :rtype: tuple (with name accessors) of :class:`attr.Attribute` - - .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields - by name. - """ - if not isclass(cls): - raise TypeError("Passed object must be a class.") - attrs = getattr(cls, "__attrs_attrs__", None) - if attrs is None: - raise NotAnAttrsClassError( - "{cls!r} is not an attrs-decorated class.".format(cls=cls) - ) - return attrs - - -def fields_dict(cls): - """ - Return an ordered dictionary of ``attrs`` attributes for a class, whose - keys are the attribute names. - - :param type cls: Class to introspect. - - :raise TypeError: If *cls* is not a class. - :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` - class. - - :rtype: an ordered dict where keys are attribute names and values are - :class:`attr.Attribute`\\ s. This will be a :class:`dict` if it's - naturally ordered like on Python 3.6+ or an - :class:`~collections.OrderedDict` otherwise. - - .. versionadded:: 18.1.0 - """ - if not isclass(cls): - raise TypeError("Passed object must be a class.") - attrs = getattr(cls, "__attrs_attrs__", None) - if attrs is None: - raise NotAnAttrsClassError( - "{cls!r} is not an attrs-decorated class.".format(cls=cls) - ) - return ordered_dict(((a.name, a) for a in attrs)) - - -def validate(inst): - """ - Validate all attributes on *inst* that have a validator. - - Leaves all exceptions through. - - :param inst: Instance of a class with ``attrs`` attributes. - """ - if _config._run_validators is False: - return - - for a in fields(inst.__class__): - v = a.validator - if v is not None: - v(inst, a, getattr(inst, a.name)) - - -def _is_slot_cls(cls): - return "__slots__" in cls.__dict__ - - -def _is_slot_attr(a_name, base_attr_map): - """ - Check if the attribute name comes from a slot class. - """ - return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) - - -def _attrs_to_init_script( - attrs, frozen, slots, post_init, cache_hash, base_attr_map, is_exc -): - """ - Return a script of an initializer for *attrs* and a dict of globals. - - The globals are expected by the generated script. - - If *frozen* is True, we cannot set the attributes directly so we use - a cached ``object.__setattr__``. - """ - lines = [] - any_slot_ancestors = any( - _is_slot_attr(a.name, base_attr_map) for a in attrs - ) - if frozen is True: - if slots is True: - lines.append( - # Circumvent the __setattr__ descriptor to save one lookup per - # assignment. - # Note _setattr will be used again below if cache_hash is True - "_setattr = _cached_setattr.__get__(self, self.__class__)" - ) - - def fmt_setter(attr_name, value_var): - return "_setattr('%(attr_name)s', %(value_var)s)" % { - "attr_name": attr_name, - "value_var": value_var, - } - - def fmt_setter_with_converter(attr_name, value_var): - conv_name = _init_converter_pat.format(attr_name) - return "_setattr('%(attr_name)s', %(conv)s(%(value_var)s))" % { - "attr_name": attr_name, - "value_var": value_var, - "conv": conv_name, - } - - else: - # Dict frozen classes assign directly to __dict__. - # But only if the attribute doesn't come from an ancestor slot - # class. - # Note _inst_dict will be used again below if cache_hash is True - lines.append("_inst_dict = self.__dict__") - if any_slot_ancestors: - lines.append( - # Circumvent the __setattr__ descriptor to save one lookup - # per assignment. - "_setattr = _cached_setattr.__get__(self, self.__class__)" - ) - - def fmt_setter(attr_name, value_var): - if _is_slot_attr(attr_name, base_attr_map): - res = "_setattr('%(attr_name)s', %(value_var)s)" % { - "attr_name": attr_name, - "value_var": value_var, - } - else: - res = "_inst_dict['%(attr_name)s'] = %(value_var)s" % { - "attr_name": attr_name, - "value_var": value_var, - } - return res - - def fmt_setter_with_converter(attr_name, value_var): - conv_name = _init_converter_pat.format(attr_name) - if _is_slot_attr(attr_name, base_attr_map): - tmpl = "_setattr('%(attr_name)s', %(c)s(%(value_var)s))" - else: - tmpl = "_inst_dict['%(attr_name)s'] = %(c)s(%(value_var)s)" - return tmpl % { - "attr_name": attr_name, - "value_var": value_var, - "c": conv_name, - } - - else: - # Not frozen. - def fmt_setter(attr_name, value): - return "self.%(attr_name)s = %(value)s" % { - "attr_name": attr_name, - "value": value, - } - - def fmt_setter_with_converter(attr_name, value_var): - conv_name = _init_converter_pat.format(attr_name) - return "self.%(attr_name)s = %(conv)s(%(value_var)s)" % { - "attr_name": attr_name, - "value_var": value_var, - "conv": conv_name, - } - - args = [] - kw_only_args = [] - attrs_to_validate = [] - - # This is a dictionary of names to validator and converter callables. - # Injecting this into __init__ globals lets us avoid lookups. - names_for_globals = {} - annotations = {"return": None} - - for a in attrs: - if a.validator: - attrs_to_validate.append(a) - attr_name = a.name - arg_name = a.name.lstrip("_") - has_factory = isinstance(a.default, Factory) - if has_factory and a.default.takes_self: - maybe_self = "self" - else: - maybe_self = "" - if a.init is False: - if has_factory: - init_factory_name = _init_factory_pat.format(a.name) - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, - init_factory_name + "({0})".format(maybe_self), - ) - ) - conv_name = _init_converter_pat.format(a.name) - names_for_globals[conv_name] = a.converter - else: - lines.append( - fmt_setter( - attr_name, - init_factory_name + "({0})".format(maybe_self), - ) - ) - names_for_globals[init_factory_name] = a.default.factory - else: - if a.converter is not None: - lines.append( - fmt_setter_with_converter( - attr_name, - "attr_dict['{attr_name}'].default".format( - attr_name=attr_name - ), - ) - ) - conv_name = _init_converter_pat.format(a.name) - names_for_globals[conv_name] = a.converter - else: - lines.append( - fmt_setter( - attr_name, - "attr_dict['{attr_name}'].default".format( - attr_name=attr_name - ), - ) - ) - elif a.default is not NOTHING and not has_factory: - arg = "{arg_name}=attr_dict['{attr_name}'].default".format( - arg_name=arg_name, attr_name=attr_name - ) - if a.kw_only: - kw_only_args.append(arg) - else: - args.append(arg) - if a.converter is not None: - lines.append(fmt_setter_with_converter(attr_name, arg_name)) - names_for_globals[ - _init_converter_pat.format(a.name) - ] = a.converter - else: - lines.append(fmt_setter(attr_name, arg_name)) - elif has_factory: - arg = "{arg_name}=NOTHING".format(arg_name=arg_name) - if a.kw_only: - kw_only_args.append(arg) - else: - args.append(arg) - lines.append( - "if {arg_name} is not NOTHING:".format(arg_name=arg_name) - ) - init_factory_name = _init_factory_pat.format(a.name) - if a.converter is not None: - lines.append( - " " + fmt_setter_with_converter(attr_name, arg_name) - ) - lines.append("else:") - lines.append( - " " - + fmt_setter_with_converter( - attr_name, - init_factory_name + "({0})".format(maybe_self), - ) - ) - names_for_globals[ - _init_converter_pat.format(a.name) - ] = a.converter - else: - lines.append(" " + fmt_setter(attr_name, arg_name)) - lines.append("else:") - lines.append( - " " - + fmt_setter( - attr_name, - init_factory_name + "({0})".format(maybe_self), - ) - ) - names_for_globals[init_factory_name] = a.default.factory - else: - if a.kw_only: - kw_only_args.append(arg_name) - else: - args.append(arg_name) - if a.converter is not None: - lines.append(fmt_setter_with_converter(attr_name, arg_name)) - names_for_globals[ - _init_converter_pat.format(a.name) - ] = a.converter - else: - lines.append(fmt_setter(attr_name, arg_name)) - - if a.init is True and a.converter is None and a.type is not None: - annotations[arg_name] = a.type - - if attrs_to_validate: # we can skip this if there are no validators. - names_for_globals["_config"] = _config - lines.append("if _config._run_validators is True:") - for a in attrs_to_validate: - val_name = "__attr_validator_{}".format(a.name) - attr_name = "__attr_{}".format(a.name) - lines.append( - " {}(self, {}, self.{})".format(val_name, attr_name, a.name) - ) - names_for_globals[val_name] = a.validator - names_for_globals[attr_name] = a - if post_init: - lines.append("self.__attrs_post_init__()") - - # because this is set only after __attrs_post_init is called, a crash - # will result if post-init tries to access the hash code. This seemed - # preferable to setting this beforehand, in which case alteration to - # field values during post-init combined with post-init accessing the - # hash code would result in silent bugs. - if cache_hash: - if frozen: - if slots: - # if frozen and slots, then _setattr defined above - init_hash_cache = "_setattr('%s', %s)" - else: - # if frozen and not slots, then _inst_dict defined above - init_hash_cache = "_inst_dict['%s'] = %s" - else: - init_hash_cache = "self.%s = %s" - lines.append(init_hash_cache % (_hash_cache_field, "None")) - - # For exceptions we rely on BaseException.__init__ for proper - # initialization. - if is_exc: - vals = ",".join("self." + a.name for a in attrs if a.init) - - lines.append("BaseException.__init__(self, %s)" % (vals,)) - - args = ", ".join(args) - if kw_only_args: - if PY2: - raise PythonTooOldError( - "Keyword-only arguments only work on Python 3 and later." - ) - - args += "{leading_comma}*, {kw_only_args}".format( - leading_comma=", " if args else "", - kw_only_args=", ".join(kw_only_args), - ) - return ( - """\ -def __init__(self, {args}): - {lines} -""".format( - args=args, lines="\n ".join(lines) if lines else "pass" - ), - names_for_globals, - annotations, - ) - - -class Attribute(object): - """ - *Read-only* representation of an attribute. - - :attribute name: The name of the attribute. - - Plus *all* arguments of :func:`attr.ib`. - - For the version history of the fields, see :func:`attr.ib`. - """ - - __slots__ = ( - "name", - "default", - "validator", - "repr", - "cmp", - "hash", - "init", - "metadata", - "type", - "converter", - "kw_only", - ) - - def __init__( - self, - name, - default, - validator, - repr, - cmp, - hash, - init, - convert=None, - metadata=None, - type=None, - converter=None, - kw_only=False, - ): - # Cache this descriptor here to speed things up later. - bound_setattr = _obj_setattr.__get__(self, Attribute) - - # Despite the big red warning, people *do* instantiate `Attribute` - # themselves. - if convert is not None: - if converter is not None: - raise RuntimeError( - "Can't pass both `convert` and `converter`. " - "Please use `converter` only." - ) - warnings.warn( - "The `convert` argument is deprecated in favor of `converter`." - " It will be removed after 2019/01.", - DeprecationWarning, - stacklevel=2, - ) - converter = convert - - bound_setattr("name", name) - bound_setattr("default", default) - bound_setattr("validator", validator) - bound_setattr("repr", repr) - bound_setattr("cmp", cmp) - bound_setattr("hash", hash) - bound_setattr("init", init) - bound_setattr("converter", converter) - bound_setattr( - "metadata", - ( - metadata_proxy(metadata) - if metadata - else _empty_metadata_singleton - ), - ) - bound_setattr("type", type) - bound_setattr("kw_only", kw_only) - - def __setattr__(self, name, value): - raise FrozenInstanceError() - - @property - def convert(self): - warnings.warn( - "The `convert` attribute is deprecated in favor of `converter`. " - "It will be removed after 2019/01.", - DeprecationWarning, - stacklevel=2, - ) - return self.converter - - @classmethod - def from_counting_attr(cls, name, ca, type=None): - # type holds the annotated value. deal with conflicts: - if type is None: - type = ca.type - elif ca.type is not None: - raise ValueError( - "Type annotation and type argument cannot both be present" - ) - inst_dict = { - k: getattr(ca, k) - for k in Attribute.__slots__ - if k - not in ( - "name", - "validator", - "default", - "type", - "convert", - ) # exclude methods and deprecated alias - } - return cls( - name=name, - validator=ca._validator, - default=ca._default, - type=type, - **inst_dict - ) - - # Don't use attr.assoc since fields(Attribute) doesn't work - def _assoc(self, **changes): - """ - Copy *self* and apply *changes*. - """ - new = copy.copy(self) - - new._setattrs(changes.items()) - - return new - - # Don't use _add_pickle since fields(Attribute) doesn't work - def __getstate__(self): - """ - Play nice with pickle. - """ - return tuple( - getattr(self, name) if name != "metadata" else dict(self.metadata) - for name in self.__slots__ - ) - - def __setstate__(self, state): - """ - Play nice with pickle. - """ - self._setattrs(zip(self.__slots__, state)) - - def _setattrs(self, name_values_pairs): - bound_setattr = _obj_setattr.__get__(self, Attribute) - for name, value in name_values_pairs: - if name != "metadata": - bound_setattr(name, value) - else: - bound_setattr( - name, - metadata_proxy(value) - if value - else _empty_metadata_singleton, - ) - - -_a = [ - Attribute( - name=name, - default=NOTHING, - validator=None, - repr=True, - cmp=True, - hash=(name != "metadata"), - init=True, - ) - for name in Attribute.__slots__ - if name != "convert" # XXX: remove once `convert` is gone -] - -Attribute = _add_hash( - _add_cmp(_add_repr(Attribute, attrs=_a), attrs=_a), - attrs=[a for a in _a if a.hash], -) - - -class _CountingAttr(object): - """ - Intermediate representation of attributes that uses a counter to preserve - the order in which the attributes have been defined. - - *Internal* data structure of the attrs library. Running into is most - likely the result of a bug like a forgotten `@attr.s` decorator. - """ - - __slots__ = ( - "counter", - "_default", - "repr", - "cmp", - "hash", - "init", - "metadata", - "_validator", - "converter", - "type", - "kw_only", - ) - __attrs_attrs__ = tuple( - Attribute( - name=name, - default=NOTHING, - validator=None, - repr=True, - cmp=True, - hash=True, - init=True, - kw_only=False, - ) - for name in ("counter", "_default", "repr", "cmp", "hash", "init") - ) + ( - Attribute( - name="metadata", - default=None, - validator=None, - repr=True, - cmp=True, - hash=False, - init=True, - kw_only=False, - ), - ) - cls_counter = 0 - - def __init__( - self, - default, - validator, - repr, - cmp, - hash, - init, - converter, - metadata, - type, - kw_only, - ): - _CountingAttr.cls_counter += 1 - self.counter = _CountingAttr.cls_counter - self._default = default - # If validator is a list/tuple, wrap it using helper validator. - if validator and isinstance(validator, (list, tuple)): - self._validator = and_(*validator) - else: - self._validator = validator - self.repr = repr - self.cmp = cmp - self.hash = hash - self.init = init - self.converter = converter - self.metadata = metadata - self.type = type - self.kw_only = kw_only - - def validator(self, meth): - """ - Decorator that adds *meth* to the list of validators. - - Returns *meth* unchanged. - - .. versionadded:: 17.1.0 - """ - if self._validator is None: - self._validator = meth - else: - self._validator = and_(self._validator, meth) - return meth - - def default(self, meth): - """ - Decorator that allows to set the default for an attribute. - - Returns *meth* unchanged. - - :raises DefaultAlreadySetError: If default has been set before. - - .. versionadded:: 17.1.0 - """ - if self._default is not NOTHING: - raise DefaultAlreadySetError() - - self._default = Factory(meth, takes_self=True) - - return meth - - -_CountingAttr = _add_cmp(_add_repr(_CountingAttr)) - - -@attrs(slots=True, init=False, hash=True) -class Factory(object): - """ - Stores a factory callable. - - If passed as the default value to :func:`attr.ib`, the factory is used to - generate a new value. - - :param callable factory: A callable that takes either none or exactly one - mandatory positional argument depending on *takes_self*. - :param bool takes_self: Pass the partially initialized instance that is - being initialized as a positional argument. - - .. versionadded:: 17.1.0 *takes_self* - """ - - factory = attrib() - takes_self = attrib() - - def __init__(self, factory, takes_self=False): - """ - `Factory` is part of the default machinery so if we want a default - value here, we have to implement it ourselves. - """ - self.factory = factory - self.takes_self = takes_self - - -def make_class(name, attrs, bases=(object,), **attributes_arguments): - """ - A quick way to create a new class called *name* with *attrs*. - - :param name: The name for the new class. - :type name: str - - :param attrs: A list of names or a dictionary of mappings of names to - attributes. - - If *attrs* is a list or an ordered dict (:class:`dict` on Python 3.6+, - :class:`collections.OrderedDict` otherwise), the order is deduced from - the order of the names or attributes inside *attrs*. Otherwise the - order of the definition of the attributes is used. - :type attrs: :class:`list` or :class:`dict` - - :param tuple bases: Classes that the new class will subclass. - - :param attributes_arguments: Passed unmodified to :func:`attr.s`. - - :return: A new class with *attrs*. - :rtype: type - - .. versionadded:: 17.1.0 *bases* - .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. - """ - if isinstance(attrs, dict): - cls_dict = attrs - elif isinstance(attrs, (list, tuple)): - cls_dict = dict((a, attrib()) for a in attrs) - else: - raise TypeError("attrs argument must be a dict or a list.") - - post_init = cls_dict.pop("__attrs_post_init__", None) - type_ = type( - name, - bases, - {} if post_init is None else {"__attrs_post_init__": post_init}, - ) - # For pickling to work, the __module__ variable needs to be set to the - # frame where the class is created. Bypass this step in environments where - # sys._getframe is not defined (Jython for example) or sys._getframe is not - # defined for arguments greater than 0 (IronPython). - try: - type_.__module__ = sys._getframe(1).f_globals.get( - "__name__", "__main__" - ) - except (AttributeError, ValueError): - pass - - return _attrs(these=cls_dict, **attributes_arguments)(type_) - - -# These are required by within this module so we define them here and merely -# import into .validators. - - -@attrs(slots=True, hash=True) -class _AndValidator(object): - """ - Compose many validators to a single one. - """ - - _validators = attrib() - - def __call__(self, inst, attr, value): - for v in self._validators: - v(inst, attr, value) - - -def and_(*validators): - """ - A validator that composes multiple validators into one. - - When called on a value, it runs all wrapped validators. - - :param validators: Arbitrary number of validators. - :type validators: callables - - .. versionadded:: 17.1.0 - """ - vals = [] - for validator in validators: - vals.extend( - validator._validators - if isinstance(validator, _AndValidator) - else [validator] - ) - - return _AndValidator(tuple(vals)) diff --git a/src/rez/vendor/attr/converters.py b/src/rez/vendor/attr/converters.py deleted file mode 100644 index 37c4a07a0..000000000 --- a/src/rez/vendor/attr/converters.py +++ /dev/null @@ -1,78 +0,0 @@ -""" -Commonly useful converters. -""" - -from __future__ import absolute_import, division, print_function - -from ._make import NOTHING, Factory - - -def optional(converter): - """ - A converter that allows an attribute to be optional. An optional attribute - is one which can be set to ``None``. - - :param callable converter: the converter that is used for non-``None`` - values. - - .. versionadded:: 17.1.0 - """ - - def optional_converter(val): - if val is None: - return None - return converter(val) - - return optional_converter - - -def default_if_none(default=NOTHING, factory=None): - """ - A converter that allows to replace ``None`` values by *default* or the - result of *factory*. - - :param default: Value to be used if ``None`` is passed. Passing an instance - of :class:`attr.Factory` is supported, however the ``takes_self`` option - is *not*. - :param callable factory: A callable that takes not parameters whose result - is used if ``None`` is passed. - - :raises TypeError: If **neither** *default* or *factory* is passed. - :raises TypeError: If **both** *default* and *factory* are passed. - :raises ValueError: If an instance of :class:`attr.Factory` is passed with - ``takes_self=True``. - - .. versionadded:: 18.2.0 - """ - if default is NOTHING and factory is None: - raise TypeError("Must pass either `default` or `factory`.") - - if default is not NOTHING and factory is not None: - raise TypeError( - "Must pass either `default` or `factory` but not both." - ) - - if factory is not None: - default = Factory(factory) - - if isinstance(default, Factory): - if default.takes_self: - raise ValueError( - "`takes_self` is not supported by default_if_none." - ) - - def default_if_none_converter(val): - if val is not None: - return val - - return default.factory() - - else: - - def default_if_none_converter(val): - if val is not None: - return val - - return default - - return default_if_none_converter diff --git a/src/rez/vendor/attr/converters.pyi b/src/rez/vendor/attr/converters.pyi deleted file mode 100644 index 63b2a3866..000000000 --- a/src/rez/vendor/attr/converters.pyi +++ /dev/null @@ -1,12 +0,0 @@ -from typing import TypeVar, Optional, Callable, overload -from . import _ConverterType - -_T = TypeVar("_T") - -def optional( - converter: _ConverterType[_T] -) -> _ConverterType[Optional[_T]]: ... -@overload -def default_if_none(default: _T) -> _ConverterType[_T]: ... -@overload -def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType[_T]: ... diff --git a/src/rez/vendor/attr/exceptions.py b/src/rez/vendor/attr/exceptions.py deleted file mode 100644 index b12e41e97..000000000 --- a/src/rez/vendor/attr/exceptions.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import absolute_import, division, print_function - - -class FrozenInstanceError(AttributeError): - """ - A frozen/immutable instance has been attempted to be modified. - - It mirrors the behavior of ``namedtuples`` by using the same error message - and subclassing :exc:`AttributeError`. - - .. versionadded:: 16.1.0 - """ - - msg = "can't set attribute" - args = [msg] - - -class AttrsAttributeNotFoundError(ValueError): - """ - An ``attrs`` function couldn't find an attribute that the user asked for. - - .. versionadded:: 16.2.0 - """ - - -class NotAnAttrsClassError(ValueError): - """ - A non-``attrs`` class has been passed into an ``attrs`` function. - - .. versionadded:: 16.2.0 - """ - - -class DefaultAlreadySetError(RuntimeError): - """ - A default has been set using ``attr.ib()`` and is attempted to be reset - using the decorator. - - .. versionadded:: 17.1.0 - """ - - -class UnannotatedAttributeError(RuntimeError): - """ - A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type - annotation. - - .. versionadded:: 17.3.0 - """ - - -class PythonTooOldError(RuntimeError): - """ - An ``attrs`` feature requiring a more recent python version has been used. - - .. versionadded:: 18.2.0 - """ diff --git a/src/rez/vendor/attr/exceptions.pyi b/src/rez/vendor/attr/exceptions.pyi deleted file mode 100644 index 48fffcc1e..000000000 --- a/src/rez/vendor/attr/exceptions.pyi +++ /dev/null @@ -1,7 +0,0 @@ -class FrozenInstanceError(AttributeError): - msg: str = ... - -class AttrsAttributeNotFoundError(ValueError): ... -class NotAnAttrsClassError(ValueError): ... -class DefaultAlreadySetError(RuntimeError): ... -class UnannotatedAttributeError(RuntimeError): ... diff --git a/src/rez/vendor/attr/filters.py b/src/rez/vendor/attr/filters.py deleted file mode 100644 index f1c69b8ba..000000000 --- a/src/rez/vendor/attr/filters.py +++ /dev/null @@ -1,52 +0,0 @@ -""" -Commonly useful filters for :func:`attr.asdict`. -""" - -from __future__ import absolute_import, division, print_function - -from ._compat import isclass -from ._make import Attribute - - -def _split_what(what): - """ - Returns a tuple of `frozenset`s of classes and attributes. - """ - return ( - frozenset(cls for cls in what if isclass(cls)), - frozenset(cls for cls in what if isinstance(cls, Attribute)), - ) - - -def include(*what): - """ - Whitelist *what*. - - :param what: What to whitelist. - :type what: :class:`list` of :class:`type` or :class:`attr.Attribute`\\ s - - :rtype: :class:`callable` - """ - cls, attrs = _split_what(what) - - def include_(attribute, value): - return value.__class__ in cls or attribute in attrs - - return include_ - - -def exclude(*what): - """ - Blacklist *what*. - - :param what: What to blacklist. - :type what: :class:`list` of classes or :class:`attr.Attribute`\\ s. - - :rtype: :class:`callable` - """ - cls, attrs = _split_what(what) - - def exclude_(attribute, value): - return value.__class__ not in cls and attribute not in attrs - - return exclude_ diff --git a/src/rez/vendor/attr/filters.pyi b/src/rez/vendor/attr/filters.pyi deleted file mode 100644 index 68368fe2b..000000000 --- a/src/rez/vendor/attr/filters.pyi +++ /dev/null @@ -1,5 +0,0 @@ -from typing import Union, Any -from . import Attribute, _FilterType - -def include(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... -def exclude(*what: Union[type, Attribute[Any]]) -> _FilterType[Any]: ... diff --git a/src/rez/vendor/attr/validators.py b/src/rez/vendor/attr/validators.py deleted file mode 100644 index 7fc4446be..000000000 --- a/src/rez/vendor/attr/validators.py +++ /dev/null @@ -1,282 +0,0 @@ -""" -Commonly useful validators. -""" - -from __future__ import absolute_import, division, print_function - -from ._make import _AndValidator, and_, attrib, attrs - - -__all__ = ["and_", "in_", "instance_of", "optional", "provides"] - - -@attrs(repr=False, slots=True, hash=True) -class _InstanceOfValidator(object): - type = attrib() - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not isinstance(value, self.type): - raise TypeError( - "'{name}' must be {type!r} (got {value!r} that is a " - "{actual!r}).".format( - name=attr.name, - type=self.type, - actual=value.__class__, - value=value, - ), - attr, - self.type, - value, - ) - - def __repr__(self): - return "".format( - type=self.type - ) - - -def instance_of(type): - """ - A validator that raises a :exc:`TypeError` if the initializer is called - with a wrong type for this particular attribute (checks are performed using - :func:`isinstance` therefore it's also valid to pass a tuple of types). - - :param type: The type to check for. - :type type: type or tuple of types - - :raises TypeError: With a human readable error message, the attribute - (of type :class:`attr.Attribute`), the expected type, and the value it - got. - """ - return _InstanceOfValidator(type) - - -@attrs(repr=False, slots=True, hash=True) -class _ProvidesValidator(object): - interface = attrib() - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not self.interface.providedBy(value): - raise TypeError( - "'{name}' must provide {interface!r} which {value!r} " - "doesn't.".format( - name=attr.name, interface=self.interface, value=value - ), - attr, - self.interface, - value, - ) - - def __repr__(self): - return "".format( - interface=self.interface - ) - - -def provides(interface): - """ - A validator that raises a :exc:`TypeError` if the initializer is called - with an object that does not provide the requested *interface* (checks are - performed using ``interface.providedBy(value)`` (see `zope.interface - `_). - - :param zope.interface.Interface interface: The interface to check for. - - :raises TypeError: With a human readable error message, the attribute - (of type :class:`attr.Attribute`), the expected interface, and the - value it got. - """ - return _ProvidesValidator(interface) - - -@attrs(repr=False, slots=True, hash=True) -class _OptionalValidator(object): - validator = attrib() - - def __call__(self, inst, attr, value): - if value is None: - return - - self.validator(inst, attr, value) - - def __repr__(self): - return "".format( - what=repr(self.validator) - ) - - -def optional(validator): - """ - A validator that makes an attribute optional. An optional attribute is one - which can be set to ``None`` in addition to satisfying the requirements of - the sub-validator. - - :param validator: A validator (or a list of validators) that is used for - non-``None`` values. - :type validator: callable or :class:`list` of callables. - - .. versionadded:: 15.1.0 - .. versionchanged:: 17.1.0 *validator* can be a list of validators. - """ - if isinstance(validator, list): - return _OptionalValidator(_AndValidator(validator)) - return _OptionalValidator(validator) - - -@attrs(repr=False, slots=True, hash=True) -class _InValidator(object): - options = attrib() - - def __call__(self, inst, attr, value): - try: - in_options = value in self.options - except TypeError: # e.g. `1 in "abc"` - in_options = False - - if not in_options: - raise ValueError( - "'{name}' must be in {options!r} (got {value!r})".format( - name=attr.name, options=self.options, value=value - ) - ) - - def __repr__(self): - return "".format( - options=self.options - ) - - -def in_(options): - """ - A validator that raises a :exc:`ValueError` if the initializer is called - with a value that does not belong in the options provided. The check is - performed using ``value in options``. - - :param options: Allowed options. - :type options: list, tuple, :class:`enum.Enum`, ... - - :raises ValueError: With a human readable error message, the attribute (of - type :class:`attr.Attribute`), the expected options, and the value it - got. - - .. versionadded:: 17.1.0 - """ - return _InValidator(options) - - -@attrs(repr=False, slots=False, hash=True) -class _IsCallableValidator(object): - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if not callable(value): - raise TypeError("'{name}' must be callable".format(name=attr.name)) - - def __repr__(self): - return "" - - -def is_callable(): - """ - A validator that raises a :class:`TypeError` if the initializer is called - with a value for this particular attribute that is not callable. - - .. versionadded:: 19.1.0 - - :raises TypeError: With a human readable error message containing the - attribute (of type :class:`attr.Attribute`) name. - """ - return _IsCallableValidator() - - -@attrs(repr=False, slots=True, hash=True) -class _DeepIterable(object): - member_validator = attrib(validator=is_callable()) - iterable_validator = attrib( - default=None, validator=optional(is_callable()) - ) - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if self.iterable_validator is not None: - self.iterable_validator(inst, attr, value) - - for member in value: - self.member_validator(inst, attr, member) - - def __repr__(self): - iterable_identifier = ( - "" - if self.iterable_validator is None - else " {iterable!r}".format(iterable=self.iterable_validator) - ) - return ( - "" - ).format( - iterable_identifier=iterable_identifier, - member=self.member_validator, - ) - - -def deep_iterable(member_validator, iterable_validator=None): - """ - A validator that performs deep validation of an iterable. - - :param member_validator: Validator to apply to iterable members - :param iterable_validator: Validator to apply to iterable itself - (optional) - - .. versionadded:: 19.1.0 - - :raises TypeError: if any sub-validators fail - """ - return _DeepIterable(member_validator, iterable_validator) - - -@attrs(repr=False, slots=True, hash=True) -class _DeepMapping(object): - key_validator = attrib(validator=is_callable()) - value_validator = attrib(validator=is_callable()) - mapping_validator = attrib(default=None, validator=optional(is_callable())) - - def __call__(self, inst, attr, value): - """ - We use a callable class to be able to change the ``__repr__``. - """ - if self.mapping_validator is not None: - self.mapping_validator(inst, attr, value) - - for key in value: - self.key_validator(inst, attr, key) - self.value_validator(inst, attr, value[key]) - - def __repr__(self): - return ( - "" - ).format(key=self.key_validator, value=self.value_validator) - - -def deep_mapping(key_validator, value_validator, mapping_validator=None): - """ - A validator that performs deep validation of a dictionary. - - :param key_validator: Validator to apply to dictionary keys - :param value_validator: Validator to apply to dictionary values - :param mapping_validator: Validator to apply to top-level mapping - attribute (optional) - - .. versionadded:: 19.1.0 - - :raises TypeError: if any sub-validators fail - """ - return _DeepMapping(key_validator, value_validator, mapping_validator) diff --git a/src/rez/vendor/attr/validators.pyi b/src/rez/vendor/attr/validators.pyi deleted file mode 100644 index 01af06845..000000000 --- a/src/rez/vendor/attr/validators.pyi +++ /dev/null @@ -1,24 +0,0 @@ -from typing import Container, List, Union, TypeVar, Type, Any, Optional, Tuple -from . import _ValidatorType - -_T = TypeVar("_T") - -def instance_of( - type: Union[Tuple[Type[_T], ...], Type[_T]] -) -> _ValidatorType[_T]: ... -def provides(interface: Any) -> _ValidatorType[Any]: ... -def optional( - validator: Union[_ValidatorType[_T], List[_ValidatorType[_T]]] -) -> _ValidatorType[Optional[_T]]: ... -def in_(options: Container[_T]) -> _ValidatorType[_T]: ... -def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... -def deep_iterable( - member_validator: _ValidatorType[_T], - iterable_validator: Optional[_ValidatorType[_T]], -) -> _ValidatorType[_T]: ... -def deep_mapping( - key_validator: _ValidatorType[_T], - value_validator: _ValidatorType[_T], - mapping_validator: Optional[_ValidatorType[_T]], -) -> _ValidatorType[_T]: ... -def is_callable() -> _ValidatorType[_T]: ... diff --git a/src/rez/vendor/colorama/__init__.py b/src/rez/vendor/colorama/__init__.py index 2a3bf4714..383101cdb 100644 --- a/src/rez/vendor/colorama/__init__.py +++ b/src/rez/vendor/colorama/__init__.py @@ -1,6 +1,7 @@ # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from .initialise import init, deinit, reinit, colorama_text +from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console from .ansi import Fore, Back, Style, Cursor from .ansitowin32 import AnsiToWin32 -__version__ = '0.4.1' +__version__ = '0.4.6' + diff --git a/src/rez/vendor/colorama/ansi.py b/src/rez/vendor/colorama/ansi.py index 78776588d..11ec695ff 100644 --- a/src/rez/vendor/colorama/ansi.py +++ b/src/rez/vendor/colorama/ansi.py @@ -6,7 +6,7 @@ CSI = '\033[' OSC = '\033]' -BEL = '\007' +BEL = '\a' def code_to_chars(code): diff --git a/src/rez/vendor/colorama/ansitowin32.py b/src/rez/vendor/colorama/ansitowin32.py index 359c92be5..abf209e60 100644 --- a/src/rez/vendor/colorama/ansitowin32.py +++ b/src/rez/vendor/colorama/ansitowin32.py @@ -3,8 +3,8 @@ import sys import os -from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style -from .winterm import WinTerm, WinColor, WinStyle +from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL +from .winterm import enable_vt_processing, WinTerm, WinColor, WinStyle from .win32 import windll, winapi_test @@ -37,6 +37,12 @@ def __enter__(self, *args, **kwargs): def __exit__(self, *args, **kwargs): return self.__wrapped.__exit__(*args, **kwargs) + def __setstate__(self, state): + self.__dict__ = state + + def __getstate__(self): + return self.__dict__ + def write(self, text): self.__convertor.write(text) @@ -57,7 +63,9 @@ def closed(self): stream = self.__wrapped try: return stream.closed - except AttributeError: + # AttributeError in the case that the stream doesn't support being closed + # ValueError for the case that the stream has already been detached when atexit runs + except (AttributeError, ValueError): return True @@ -68,7 +76,7 @@ class AnsiToWin32(object): win32 function calls. ''' ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer - ANSI_OSC_RE = re.compile('\001?\033\\]((?:.|;)*?)(\x07)\002?') # Operating System Command + ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command def __init__(self, wrapped, convert=None, strip=None, autoreset=False): # The wrapped stream (normally sys.stdout or sys.stderr) @@ -86,15 +94,22 @@ def __init__(self, wrapped, convert=None, strip=None, autoreset=False): # (e.g. Cygwin Terminal). In this case it's up to the terminal # to support the ANSI codes. conversion_supported = on_windows and winapi_test() + try: + fd = wrapped.fileno() + except Exception: + fd = -1 + system_has_native_ansi = not on_windows or enable_vt_processing(fd) + have_tty = not self.stream.closed and self.stream.isatty() + need_conversion = conversion_supported and not system_has_native_ansi # should we strip ANSI sequences from our output? if strip is None: - strip = conversion_supported or (not self.stream.closed and not self.stream.isatty()) + strip = need_conversion or not have_tty self.strip = strip # should we should convert ANSI sequences into win32 calls? if convert is None: - convert = conversion_supported and not self.stream.closed and self.stream.isatty() + convert = need_conversion and have_tty self.convert = convert # dict of ansi codes to win32 functions and parameters @@ -247,11 +262,16 @@ def convert_osc(self, text): start, end = match.span() text = text[:start] + text[end:] paramstring, command = match.groups() - if command in '\x07': # \x07 = BEL - params = paramstring.split(";") - # 0 - change title and icon (we will only change title) - # 1 - change icon (we don't support this) - # 2 - change title - if params[0] in '02': - winterm.set_title(params[1]) + if command == BEL: + if paramstring.count(";") == 1: + params = paramstring.split(";") + # 0 - change title and icon (we will only change title) + # 1 - change icon (we don't support this) + # 2 - change title + if params[0] in '02': + winterm.set_title(params[1]) return text + + + def flush(self): + self.wrapped.flush() diff --git a/src/rez/vendor/colorama/initialise.py b/src/rez/vendor/colorama/initialise.py index 430d06687..d5fd4b71f 100644 --- a/src/rez/vendor/colorama/initialise.py +++ b/src/rez/vendor/colorama/initialise.py @@ -6,13 +6,27 @@ from .ansitowin32 import AnsiToWin32 -orig_stdout = None -orig_stderr = None +def _wipe_internal_state_for_tests(): + global orig_stdout, orig_stderr + orig_stdout = None + orig_stderr = None + + global wrapped_stdout, wrapped_stderr + wrapped_stdout = None + wrapped_stderr = None -wrapped_stdout = None -wrapped_stderr = None + global atexit_done + atexit_done = False + + global fixed_windows_console + fixed_windows_console = False -atexit_done = False + try: + # no-op if it wasn't registered + atexit.unregister(reset_all) + except AttributeError: + # python 2: no atexit.unregister. Oh well, we did our best. + pass def reset_all(): @@ -55,6 +69,29 @@ def deinit(): sys.stderr = orig_stderr +def just_fix_windows_console(): + global fixed_windows_console + + if sys.platform != "win32": + return + if fixed_windows_console: + return + if wrapped_stdout is not None or wrapped_stderr is not None: + # Someone already ran init() and it did stuff, so we won't second-guess them + return + + # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the + # native ANSI support in the console as a side-effect. We only need to actually + # replace sys.stdout/stderr if we're in the old-style conversion mode. + new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False) + if new_stdout.convert: + sys.stdout = new_stdout + new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False) + if new_stderr.convert: + sys.stderr = new_stderr + + fixed_windows_console = True + @contextlib.contextmanager def colorama_text(*args, **kwargs): init(*args, **kwargs) @@ -78,3 +115,7 @@ def wrap_stream(stream, convert, strip, autoreset, wrap): if wrapper.should_wrap(): stream = wrapper.stream return stream + + +# Use this for initial setup as well, to reduce code duplication +_wipe_internal_state_for_tests() diff --git a/src/rez/vendor/colorama/tests/ansitowin32_test.py b/src/rez/vendor/colorama/tests/ansitowin32_test.py index 4801cf8e6..deeffdca5 100644 --- a/src/rez/vendor/colorama/tests/ansitowin32_test.py +++ b/src/rez/vendor/colorama/tests/ansitowin32_test.py @@ -1,10 +1,22 @@ # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from io import StringIO +from io import StringIO, TextIOWrapper from unittest import TestCase, main - -from rez.vendor.mock import MagicMock, Mock, patch +try: + from contextlib import ExitStack +except ImportError: + # python 2 + from contextlib2 import ExitStack + +try: + from rez.vendor.mock import MagicMock, Mock, patch +except ImportError: + try: + from unittest.mock import MagicMock, Mock, patch + except ImportError: + from mock import MagicMock, Mock, patch from ..ansitowin32 import AnsiToWin32, StreamWrapper +from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING from .utils import osname @@ -37,6 +49,17 @@ def testProxyNoContextManager(self): with StreamWrapper(mockStream, mockConverter) as wrapper: wrapper.write('hello') + def test_closed_shouldnt_raise_on_closed_stream(self): + stream = StringIO() + stream.close() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) + + def test_closed_shouldnt_raise_on_detached_stream(self): + stream = TextIOWrapper(StringIO()) + stream.detach() + wrapper = StreamWrapper(stream, None) + self.assertEqual(wrapper.closed, True) class AnsiToWin32Test(TestCase): @@ -167,13 +190,19 @@ def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self): def test_wrap_shouldnt_raise_on_closed_orig_stdout(self): stream = StringIO() stream.close() - converter = AnsiToWin32(stream) - self.assertFalse(converter.strip) + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(stream) + self.assertTrue(converter.strip) self.assertFalse(converter.convert) def test_wrap_shouldnt_raise_on_missing_closed_attr(self): - converter = AnsiToWin32(object()) - self.assertFalse(converter.strip) + with \ + patch("colorama.ansitowin32.os.name", "nt"), \ + patch("colorama.ansitowin32.winapi_test", lambda: True): + converter = AnsiToWin32(object()) + self.assertTrue(converter.strip) self.assertFalse(converter.convert) def testExtractParams(self): @@ -203,6 +232,66 @@ def testCallWin32UsesLookup(self): [a[0][0] for a in listener.call_args_list], [33, 11, 22] ) + def test_osc_codes(self): + mockStdout = Mock() + stream = AnsiToWin32(mockStdout, convert=True) + with patch('colorama.ansitowin32.winterm') as winterm: + data = [ + '\033]0\x07', # missing arguments + '\033]0;foo\x08', # wrong OSC command + '\033]0;colorama_test_title\x07', # should work + '\033]1;colorama_test_title\x07', # wrong set command + '\033]2;colorama_test_title\x07', # should work + '\033]' + ';' * 64 + '\x08', # see issue #247 + ] + for code in data: + stream.write(code) + self.assertEqual(winterm.set_title.call_count, 2) + + def test_native_windows_ansi(self): + with ExitStack() as stack: + def p(a, b): + stack.enter_context(patch(a, b, create=True)) + # Pretend to be on Windows + p("colorama.ansitowin32.os.name", "nt") + p("colorama.ansitowin32.winapi_test", lambda: True) + p("colorama.win32.winapi_test", lambda: True) + p("colorama.winterm.win32.windll", "non-None") + p("colorama.winterm.get_osfhandle", lambda _: 1234) + + # Pretend that our mock stream has native ANSI support + p( + "colorama.winterm.win32.GetConsoleMode", + lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = True + stdout.fileno.return_value = 1 + + # Our fake console says it has native vt support, so AnsiToWin32 should + # enable that support and do nothing else. + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertFalse(stream.strip) + self.assertFalse(stream.convert) + self.assertFalse(stream.should_wrap()) + + # Now let's pretend we're on an old Windows console, that doesn't have + # native ANSI support. + p("colorama.winterm.win32.GetConsoleMode", lambda _: 0) + SetConsoleMode = Mock() + p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode) + + stream = AnsiToWin32(stdout) + SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING) + self.assertTrue(stream.strip) + self.assertTrue(stream.convert) + self.assertTrue(stream.should_wrap()) + if __name__ == '__main__': main() diff --git a/src/rez/vendor/colorama/tests/initialise_test.py b/src/rez/vendor/colorama/tests/initialise_test.py index 04285bf83..6363edf16 100644 --- a/src/rez/vendor/colorama/tests/initialise_test.py +++ b/src/rez/vendor/colorama/tests/initialise_test.py @@ -1,13 +1,18 @@ # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -import os import sys -from unittest import TestCase, main +from unittest import TestCase, main, skipUnless -from rez.vendor.mock import patch +try: + from rez.vendor.mock import patch, Mock +except ImportError: + try: + from unittest.mock import patch, Mock + except ImportError: + from mock import patch, Mock from ..ansitowin32 import StreamWrapper -from ..initialise import init -from .utils import osname, redirected_output, replace_by +from ..initialise import init, just_fix_windows_console, _wipe_internal_state_for_tests +from .utils import osname, replace_by orig_stdout = sys.stdout orig_stderr = sys.stderr @@ -15,11 +20,13 @@ class InitTest(TestCase): + @skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty") def setUp(self): # sanity check self.assertNotWrapped() def tearDown(self): + _wipe_internal_state_for_tests() sys.stdout = orig_stdout sys.stderr = orig_stderr @@ -37,6 +44,7 @@ def assertNotWrapped(self): @patch('colorama.initialise.reset_all') @patch('colorama.ansitowin32.winapi_test', lambda *_: True) + @patch('colorama.ansitowin32.enable_vt_processing', lambda *_: False) def testInitWrapsOnWindows(self, _): with osname("nt"): init() @@ -75,14 +83,6 @@ def testInitWrapOffDoesntWrapOnWindows(self): def testInitWrapOffIncompatibleWithAutoresetOn(self): self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False)) - @patch('colorama.ansitowin32.winterm', None) - @patch('colorama.ansitowin32.winapi_test', lambda *_: True) - def testInitOnlyWrapsOnce(self): - with osname("nt"): - init() - init() - self.assertWrapped() - @patch('colorama.win32.SetConsoleTextAttribute') @patch('colorama.initialise.AnsiToWin32') def testAutoResetPassedOn(self, mockATW32, _): @@ -119,5 +119,74 @@ def testAtexitRegisteredOnlyOnce(self, mockRegister): self.assertFalse(mockRegister.called) +class JustFixWindowsConsoleTest(TestCase): + def _reset(self): + _wipe_internal_state_for_tests() + sys.stdout = orig_stdout + sys.stderr = orig_stderr + + def tearDown(self): + self._reset() + + @patch("colorama.ansitowin32.winapi_test", lambda: True) + def testJustFixWindowsConsole(self): + if sys.platform != "win32": + # just_fix_windows_console should be a no-op + just_fix_windows_console() + self.assertIs(sys.stdout, orig_stdout) + self.assertIs(sys.stderr, orig_stderr) + else: + def fake_std(): + # Emulate stdout=not a tty, stderr=tty + # to check that we handle both cases correctly + stdout = Mock() + stdout.closed = False + stdout.isatty.return_value = False + stdout.fileno.return_value = 1 + sys.stdout = stdout + + stderr = Mock() + stderr.closed = False + stderr.isatty.return_value = True + stderr.fileno.return_value = 2 + sys.stderr = stderr + + for native_ansi in [False, True]: + with patch( + 'colorama.ansitowin32.enable_vt_processing', + lambda *_: native_ansi + ): + self._reset() + fake_std() + + # Regular single-call test + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + if native_ansi: + self.assertIs(sys.stderr, prev_stderr) + else: + self.assertIsNot(sys.stderr, prev_stderr) + + # second call without resetting is always a no-op + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(sys.stdout, prev_stdout) + self.assertIs(sys.stderr, prev_stderr) + + self._reset() + fake_std() + + # If init() runs first, just_fix_windows_console should be a no-op + init() + prev_stdout = sys.stdout + prev_stderr = sys.stderr + just_fix_windows_console() + self.assertIs(prev_stdout, sys.stdout) + self.assertIs(prev_stderr, sys.stderr) + + if __name__ == '__main__': main() diff --git a/src/rez/vendor/colorama/tests/utils.py b/src/rez/vendor/colorama/tests/utils.py index c201c561b..472fafb44 100644 --- a/src/rez/vendor/colorama/tests/utils.py +++ b/src/rez/vendor/colorama/tests/utils.py @@ -4,7 +4,6 @@ import sys import os -from rez.vendor.mock import Mock class StreamTTY(StringIO): def isatty(self): @@ -21,14 +20,6 @@ def osname(name): yield os.name = orig -@contextmanager -def redirected_output(): - orig = sys.stdout - sys.stdout = Mock() - sys.stdout.isatty = lambda: False - yield - sys.stdout = orig - @contextmanager def replace_by(stream): orig_stdout = sys.stdout diff --git a/src/rez/vendor/colorama/tests/winterm_test.py b/src/rez/vendor/colorama/tests/winterm_test.py index c83b3a82b..388538ca3 100644 --- a/src/rez/vendor/colorama/tests/winterm_test.py +++ b/src/rez/vendor/colorama/tests/winterm_test.py @@ -2,7 +2,13 @@ import sys from unittest import TestCase, main, skipUnless -from rez.vendor.mock import Mock, patch +try: + from rez.vendor.mock import Mock, patch +except ImportError: + try: + from unittest.mock import Mock, patch + except ImportError: + from mock import Mock, patch from ..winterm import WinColor, WinStyle, WinTerm diff --git a/src/rez/vendor/colorama/win32.py b/src/rez/vendor/colorama/win32.py index c2d836033..841b0e270 100644 --- a/src/rez/vendor/colorama/win32.py +++ b/src/rez/vendor/colorama/win32.py @@ -4,6 +4,8 @@ STDOUT = -11 STDERR = -12 +ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + try: import ctypes from ctypes import LibraryLoader @@ -89,6 +91,20 @@ def __str__(self): ] _SetConsoleTitleW.restype = wintypes.BOOL + _GetConsoleMode = windll.kernel32.GetConsoleMode + _GetConsoleMode.argtypes = [ + wintypes.HANDLE, + POINTER(wintypes.DWORD) + ] + _GetConsoleMode.restype = wintypes.BOOL + + _SetConsoleMode = windll.kernel32.SetConsoleMode + _SetConsoleMode.argtypes = [ + wintypes.HANDLE, + wintypes.DWORD + ] + _SetConsoleMode.restype = wintypes.BOOL + def _winapi_test(handle): csbi = CONSOLE_SCREEN_BUFFER_INFO() success = _GetConsoleScreenBufferInfo( @@ -150,3 +166,15 @@ def FillConsoleOutputAttribute(stream_id, attr, length, start): def SetConsoleTitle(title): return _SetConsoleTitleW(title) + + def GetConsoleMode(handle): + mode = wintypes.DWORD() + success = _GetConsoleMode(handle, byref(mode)) + if not success: + raise ctypes.WinError() + return mode.value + + def SetConsoleMode(handle, mode): + success = _SetConsoleMode(handle, mode) + if not success: + raise ctypes.WinError() diff --git a/src/rez/vendor/colorama/winterm.py b/src/rez/vendor/colorama/winterm.py index 0fdb4ec4e..aad867e8c 100644 --- a/src/rez/vendor/colorama/winterm.py +++ b/src/rez/vendor/colorama/winterm.py @@ -1,6 +1,12 @@ # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -from . import win32 +try: + from msvcrt import get_osfhandle +except ImportError: + def get_osfhandle(_): + raise OSError("This isn't windows!") + +from . import win32 # from wincon.h class WinColor(object): @@ -167,3 +173,23 @@ def erase_line(self, mode=0, on_stderr=False): def set_title(self, title): win32.SetConsoleTitle(title) + + +def enable_vt_processing(fd): + if win32.windll is None or not win32.winapi_test(): + return False + + try: + handle = get_osfhandle(fd) + mode = win32.GetConsoleMode(handle) + win32.SetConsoleMode( + handle, + mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING, + ) + + mode = win32.GetConsoleMode(handle) + if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING: + return True + # Can get TypeError in testsuite where 'fd' is a Mock() + except (OSError, TypeError): + return False diff --git a/src/rez/vendor/distlib/LICENSE b/src/rez/vendor/distlib/LICENSE.txt similarity index 99% rename from src/rez/vendor/distlib/LICENSE rename to src/rez/vendor/distlib/LICENSE.txt index 9e9786b52..c31ac56d7 100644 --- a/src/rez/vendor/distlib/LICENSE +++ b/src/rez/vendor/distlib/LICENSE.txt @@ -282,5 +282,3 @@ FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - diff --git a/src/rez/vendor/distlib/__init__.py b/src/rez/vendor/distlib/__init__.py index a2d70d475..bf0d6c6d3 100644 --- a/src/rez/vendor/distlib/__init__.py +++ b/src/rez/vendor/distlib/__init__.py @@ -1,23 +1,33 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2012-2019 Vinay Sajip. +# Copyright (C) 2012-2023 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # import logging -__version__ = '0.2.9.post0' +__version__ = '0.3.9' + class DistlibException(Exception): pass + try: from logging import NullHandler -except ImportError: # pragma: no cover +except ImportError: # pragma: no cover + class NullHandler(logging.Handler): - def handle(self, record): pass - def emit(self, record): pass - def createLock(self): self.lock = None + + def handle(self, record): + pass + + def emit(self, record): + pass + + def createLock(self): + self.lock = None + logger = logging.getLogger(__name__) logger.addHandler(NullHandler()) diff --git a/src/rez/vendor/distlib/_backport/__init__.py b/src/rez/vendor/distlib/_backport/__init__.py deleted file mode 100644 index f7dbf4c9a..000000000 --- a/src/rez/vendor/distlib/_backport/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -"""Modules copied from Python 3 standard libraries, for internal use only. - -Individual classes and functions are found in d2._backport.misc. Intended -usage is to always import things missing from 3.1 from that module: the -built-in/stdlib objects will be used if found. -""" diff --git a/src/rez/vendor/distlib/_backport/misc.py b/src/rez/vendor/distlib/_backport/misc.py deleted file mode 100644 index cfb318d34..000000000 --- a/src/rez/vendor/distlib/_backport/misc.py +++ /dev/null @@ -1,41 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Backports for individual classes and functions.""" - -import os -import sys - -__all__ = ['cache_from_source', 'callable', 'fsencode'] - - -try: - from imp import cache_from_source -except ImportError: - def cache_from_source(py_file, debug=__debug__): - ext = debug and 'c' or 'o' - return py_file + ext - - -try: - callable = callable -except NameError: - from collections import Callable - - def callable(obj): - return isinstance(obj, Callable) - - -try: - fsencode = os.fsencode -except AttributeError: - def fsencode(filename): - if isinstance(filename, bytes): - return filename - elif isinstance(filename, str): - return filename.encode(sys.getfilesystemencoding()) - else: - raise TypeError("expect bytes or str, not %s" % - type(filename).__name__) diff --git a/src/rez/vendor/distlib/_backport/shutil.py b/src/rez/vendor/distlib/_backport/shutil.py deleted file mode 100644 index 159e49ee8..000000000 --- a/src/rez/vendor/distlib/_backport/shutil.py +++ /dev/null @@ -1,761 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Utility functions for copying and archiving files and directory trees. - -XXX The functions here don't copy the resource fork or other metadata on Mac. - -""" - -import os -import sys -import stat -from os.path import abspath -import fnmatch -import collections -import errno -from . import tarfile - -try: - import bz2 - _BZ2_SUPPORTED = True -except ImportError: - _BZ2_SUPPORTED = False - -try: - from pwd import getpwnam -except ImportError: - getpwnam = None - -try: - from grp import getgrnam -except ImportError: - getgrnam = None - -__all__ = ["copyfileobj", "copyfile", "copymode", "copystat", "copy", "copy2", - "copytree", "move", "rmtree", "Error", "SpecialFileError", - "ExecError", "make_archive", "get_archive_formats", - "register_archive_format", "unregister_archive_format", - "get_unpack_formats", "register_unpack_format", - "unregister_unpack_format", "unpack_archive", "ignore_patterns"] - -class Error(EnvironmentError): - pass - -class SpecialFileError(EnvironmentError): - """Raised when trying to do a kind of operation (e.g. copying) which is - not supported on a special file (e.g. a named pipe)""" - -class ExecError(EnvironmentError): - """Raised when a command could not be executed""" - -class ReadError(EnvironmentError): - """Raised when an archive cannot be read""" - -class RegistryError(Exception): - """Raised when a registry operation with the archiving - and unpacking registries fails""" - - -try: - WindowsError -except NameError: - WindowsError = None - -def copyfileobj(fsrc, fdst, length=16*1024): - """copy data from file-like object fsrc to file-like object fdst""" - while 1: - buf = fsrc.read(length) - if not buf: - break - fdst.write(buf) - -def _samefile(src, dst): - # Macintosh, Unix. - if hasattr(os.path, 'samefile'): - try: - return os.path.samefile(src, dst) - except OSError: - return False - - # All other platforms: check for same pathname. - return (os.path.normcase(os.path.abspath(src)) == - os.path.normcase(os.path.abspath(dst))) - -def copyfile(src, dst): - """Copy data from src to dst""" - if _samefile(src, dst): - raise Error("`%s` and `%s` are the same file" % (src, dst)) - - for fn in [src, dst]: - try: - st = os.stat(fn) - except OSError: - # File most likely does not exist - pass - else: - # XXX What about other special files? (sockets, devices...) - if stat.S_ISFIFO(st.st_mode): - raise SpecialFileError("`%s` is a named pipe" % fn) - - with open(src, 'rb') as fsrc: - with open(dst, 'wb') as fdst: - copyfileobj(fsrc, fdst) - -def copymode(src, dst): - """Copy mode bits from src to dst""" - if hasattr(os, 'chmod'): - st = os.stat(src) - mode = stat.S_IMODE(st.st_mode) - os.chmod(dst, mode) - -def copystat(src, dst): - """Copy all stat info (mode bits, atime, mtime, flags) from src to dst""" - st = os.stat(src) - mode = stat.S_IMODE(st.st_mode) - if hasattr(os, 'utime'): - os.utime(dst, (st.st_atime, st.st_mtime)) - if hasattr(os, 'chmod'): - os.chmod(dst, mode) - if hasattr(os, 'chflags') and hasattr(st, 'st_flags'): - try: - os.chflags(dst, st.st_flags) - except OSError as why: - if (not hasattr(errno, 'EOPNOTSUPP') or - why.errno != errno.EOPNOTSUPP): - raise - -def copy(src, dst): - """Copy data and mode bits ("cp src dst"). - - The destination may be a directory. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst) - copymode(src, dst) - -def copy2(src, dst): - """Copy data and all stat info ("cp -p src dst"). - - The destination may be a directory. - - """ - if os.path.isdir(dst): - dst = os.path.join(dst, os.path.basename(src)) - copyfile(src, dst) - copystat(src, dst) - -def ignore_patterns(*patterns): - """Function that can be used as copytree() ignore parameter. - - Patterns is a sequence of glob-style patterns - that are used to exclude files""" - def _ignore_patterns(path, names): - ignored_names = [] - for pattern in patterns: - ignored_names.extend(fnmatch.filter(names, pattern)) - return set(ignored_names) - return _ignore_patterns - -def copytree(src, dst, symlinks=False, ignore=None, copy_function=copy2, - ignore_dangling_symlinks=False): - """Recursively copy a directory tree. - - The destination directory must not already exist. - If exception(s) occur, an Error is raised with a list of reasons. - - If the optional symlinks flag is true, symbolic links in the - source tree result in symbolic links in the destination tree; if - it is false, the contents of the files pointed to by symbolic - links are copied. If the file pointed by the symlink doesn't - exist, an exception will be added in the list of errors raised in - an Error exception at the end of the copy process. - - You can set the optional ignore_dangling_symlinks flag to true if you - want to silence this exception. Notice that this has no effect on - platforms that don't support os.symlink. - - The optional ignore argument is a callable. If given, it - is called with the `src` parameter, which is the directory - being visited by copytree(), and `names` which is the list of - `src` contents, as returned by os.listdir(): - - callable(src, names) -> ignored_names - - Since copytree() is called recursively, the callable will be - called once for each directory that is copied. It returns a - list of names relative to the `src` directory that should - not be copied. - - The optional copy_function argument is a callable that will be used - to copy each file. It will be called with the source path and the - destination path as arguments. By default, copy2() is used, but any - function that supports the same signature (like copy()) can be used. - - """ - names = os.listdir(src) - if ignore is not None: - ignored_names = ignore(src, names) - else: - ignored_names = set() - - os.makedirs(dst) - errors = [] - for name in names: - if name in ignored_names: - continue - srcname = os.path.join(src, name) - dstname = os.path.join(dst, name) - try: - if os.path.islink(srcname): - linkto = os.readlink(srcname) - if symlinks: - os.symlink(linkto, dstname) - else: - # ignore dangling symlink if the flag is on - if not os.path.exists(linkto) and ignore_dangling_symlinks: - continue - # otherwise let the copy occurs. copy2 will raise an error - copy_function(srcname, dstname) - elif os.path.isdir(srcname): - copytree(srcname, dstname, symlinks, ignore, copy_function) - else: - # Will raise a SpecialFileError for unsupported file types - copy_function(srcname, dstname) - # catch the Error from the recursive copytree so that we can - # continue with other files - except Error as err: - errors.extend(err.args[0]) - except EnvironmentError as why: - errors.append((srcname, dstname, str(why))) - try: - copystat(src, dst) - except OSError as why: - if WindowsError is not None and isinstance(why, WindowsError): - # Copying file access times may fail on Windows - pass - else: - errors.extend((src, dst, str(why))) - if errors: - raise Error(errors) - -def rmtree(path, ignore_errors=False, onerror=None): - """Recursively delete a directory tree. - - If ignore_errors is set, errors are ignored; otherwise, if onerror - is set, it is called to handle the error with arguments (func, - path, exc_info) where func is os.listdir, os.remove, or os.rmdir; - path is the argument to that function that caused it to fail; and - exc_info is a tuple returned by sys.exc_info(). If ignore_errors - is false and onerror is None, an exception is raised. - - """ - if ignore_errors: - def onerror(*args): - pass - elif onerror is None: - def onerror(*args): - raise - try: - if os.path.islink(path): - # symlinks to directories are forbidden, see bug #1669 - raise OSError("Cannot call rmtree on a symbolic link") - except OSError: - onerror(os.path.islink, path, sys.exc_info()) - # can't continue even if onerror hook returns - return - names = [] - try: - names = os.listdir(path) - except os.error: - onerror(os.listdir, path, sys.exc_info()) - for name in names: - fullname = os.path.join(path, name) - try: - mode = os.lstat(fullname).st_mode - except os.error: - mode = 0 - if stat.S_ISDIR(mode): - rmtree(fullname, ignore_errors, onerror) - else: - try: - os.remove(fullname) - except os.error: - onerror(os.remove, fullname, sys.exc_info()) - try: - os.rmdir(path) - except os.error: - onerror(os.rmdir, path, sys.exc_info()) - - -def _basename(path): - # A basename() variant which first strips the trailing slash, if present. - # Thus we always get the last component of the path, even for directories. - return os.path.basename(path.rstrip(os.path.sep)) - -def move(src, dst): - """Recursively move a file or directory to another location. This is - similar to the Unix "mv" command. - - If the destination is a directory or a symlink to a directory, the source - is moved inside the directory. The destination path must not already - exist. - - If the destination already exists but is not a directory, it may be - overwritten depending on os.rename() semantics. - - If the destination is on our current filesystem, then rename() is used. - Otherwise, src is copied to the destination and then removed. - A lot more could be done here... A look at a mv.c shows a lot of - the issues this implementation glosses over. - - """ - real_dst = dst - if os.path.isdir(dst): - if _samefile(src, dst): - # We might be on a case insensitive filesystem, - # perform the rename anyway. - os.rename(src, dst) - return - - real_dst = os.path.join(dst, _basename(src)) - if os.path.exists(real_dst): - raise Error("Destination path '%s' already exists" % real_dst) - try: - os.rename(src, real_dst) - except OSError: - if os.path.isdir(src): - if _destinsrc(src, dst): - raise Error("Cannot move a directory '%s' into itself '%s'." % (src, dst)) - copytree(src, real_dst, symlinks=True) - rmtree(src) - else: - copy2(src, real_dst) - os.unlink(src) - -def _destinsrc(src, dst): - src = abspath(src) - dst = abspath(dst) - if not src.endswith(os.path.sep): - src += os.path.sep - if not dst.endswith(os.path.sep): - dst += os.path.sep - return dst.startswith(src) - -def _get_gid(name): - """Returns a gid, given a group name.""" - if getgrnam is None or name is None: - return None - try: - result = getgrnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _get_uid(name): - """Returns an uid, given a user name.""" - if getpwnam is None or name is None: - return None - try: - result = getpwnam(name) - except KeyError: - result = None - if result is not None: - return result[2] - return None - -def _make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0, - owner=None, group=None, logger=None): - """Create a (possibly compressed) tar file from all the files under - 'base_dir'. - - 'compress' must be "gzip" (the default), "bzip2", or None. - - 'owner' and 'group' can be used to define an owner and a group for the - archive that is being built. If not provided, the current owner and group - will be used. - - The output tar file will be named 'base_name' + ".tar", possibly plus - the appropriate compression extension (".gz", or ".bz2"). - - Returns the output filename. - """ - tar_compression = {'gzip': 'gz', None: ''} - compress_ext = {'gzip': '.gz'} - - if _BZ2_SUPPORTED: - tar_compression['bzip2'] = 'bz2' - compress_ext['bzip2'] = '.bz2' - - # flags for compression program, each element of list will be an argument - if compress is not None and compress not in compress_ext: - raise ValueError("bad value for 'compress', or compression format not " - "supported : {0}".format(compress)) - - archive_name = base_name + '.tar' + compress_ext.get(compress, '') - archive_dir = os.path.dirname(archive_name) - - if not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # creating the tarball - if logger is not None: - logger.info('Creating tar archive') - - uid = _get_uid(owner) - gid = _get_gid(group) - - def _set_uid_gid(tarinfo): - if gid is not None: - tarinfo.gid = gid - tarinfo.gname = group - if uid is not None: - tarinfo.uid = uid - tarinfo.uname = owner - return tarinfo - - if not dry_run: - tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress]) - try: - tar.add(base_dir, filter=_set_uid_gid) - finally: - tar.close() - - return archive_name - -def _call_external_zip(base_dir, zip_filename, verbose=False, dry_run=False): - # XXX see if we want to keep an external call here - if verbose: - zipoptions = "-r" - else: - zipoptions = "-rq" - from distutils.errors import DistutilsExecError - from distutils.spawn import spawn - try: - spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) - except DistutilsExecError: - # XXX really should distinguish between "couldn't find - # external 'zip' command" and "zip failed". - raise ExecError("unable to create zip file '%s': " - "could neither import the 'zipfile' module nor " - "find a standalone zip utility") % zip_filename - -def _make_zipfile(base_name, base_dir, verbose=0, dry_run=0, logger=None): - """Create a zip file from all the files under 'base_dir'. - - The output zip file will be named 'base_name' + ".zip". Uses either the - "zipfile" Python module (if available) or the InfoZIP "zip" utility - (if installed and found on the default search path). If neither tool is - available, raises ExecError. Returns the name of the output zip - file. - """ - zip_filename = base_name + ".zip" - archive_dir = os.path.dirname(base_name) - - if not os.path.exists(archive_dir): - if logger is not None: - logger.info("creating %s", archive_dir) - if not dry_run: - os.makedirs(archive_dir) - - # If zipfile module is not available, try spawning an external 'zip' - # command. - try: - import zipfile - except ImportError: - zipfile = None - - if zipfile is None: - _call_external_zip(base_dir, zip_filename, verbose, dry_run) - else: - if logger is not None: - logger.info("creating '%s' and adding '%s' to it", - zip_filename, base_dir) - - if not dry_run: - zip = zipfile.ZipFile(zip_filename, "w", - compression=zipfile.ZIP_DEFLATED) - - for dirpath, dirnames, filenames in os.walk(base_dir): - for name in filenames: - path = os.path.normpath(os.path.join(dirpath, name)) - if os.path.isfile(path): - zip.write(path, path) - if logger is not None: - logger.info("adding '%s'", path) - zip.close() - - return zip_filename - -_ARCHIVE_FORMATS = { - 'gztar': (_make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"), - 'bztar': (_make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"), - 'tar': (_make_tarball, [('compress', None)], "uncompressed tar file"), - 'zip': (_make_zipfile, [], "ZIP file"), - } - -if _BZ2_SUPPORTED: - _ARCHIVE_FORMATS['bztar'] = (_make_tarball, [('compress', 'bzip2')], - "bzip2'ed tar-file") - -def get_archive_formats(): - """Returns a list of supported formats for archiving and unarchiving. - - Each element of the returned sequence is a tuple (name, description) - """ - formats = [(name, registry[2]) for name, registry in - _ARCHIVE_FORMATS.items()] - formats.sort() - return formats - -def register_archive_format(name, function, extra_args=None, description=''): - """Registers an archive format. - - name is the name of the format. function is the callable that will be - used to create archives. If provided, extra_args is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_archive_formats() function. - """ - if extra_args is None: - extra_args = [] - if not isinstance(function, collections.Callable): - raise TypeError('The %s object is not callable' % function) - if not isinstance(extra_args, (tuple, list)): - raise TypeError('extra_args needs to be a sequence') - for element in extra_args: - if not isinstance(element, (tuple, list)) or len(element) !=2: - raise TypeError('extra_args elements are : (arg_name, value)') - - _ARCHIVE_FORMATS[name] = (function, extra_args, description) - -def unregister_archive_format(name): - del _ARCHIVE_FORMATS[name] - -def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0, - dry_run=0, owner=None, group=None, logger=None): - """Create an archive file (eg. zip or tar). - - 'base_name' is the name of the file to create, minus any format-specific - extension; 'format' is the archive format: one of "zip", "tar", "bztar" - or "gztar". - - 'root_dir' is a directory that will be the root directory of the - archive; ie. we typically chdir into 'root_dir' before creating the - archive. 'base_dir' is the directory where we start archiving from; - ie. 'base_dir' will be the common prefix of all files and - directories in the archive. 'root_dir' and 'base_dir' both default - to the current directory. Returns the name of the archive file. - - 'owner' and 'group' are used when creating a tar archive. By default, - uses the current owner and group. - """ - save_cwd = os.getcwd() - if root_dir is not None: - if logger is not None: - logger.debug("changing into '%s'", root_dir) - base_name = os.path.abspath(base_name) - if not dry_run: - os.chdir(root_dir) - - if base_dir is None: - base_dir = os.curdir - - kwargs = {'dry_run': dry_run, 'logger': logger} - - try: - format_info = _ARCHIVE_FORMATS[format] - except KeyError: - raise ValueError("unknown archive format '%s'" % format) - - func = format_info[0] - for arg, val in format_info[1]: - kwargs[arg] = val - - if format != 'zip': - kwargs['owner'] = owner - kwargs['group'] = group - - try: - filename = func(base_name, base_dir, **kwargs) - finally: - if root_dir is not None: - if logger is not None: - logger.debug("changing back to '%s'", save_cwd) - os.chdir(save_cwd) - - return filename - - -def get_unpack_formats(): - """Returns a list of supported formats for unpacking. - - Each element of the returned sequence is a tuple - (name, extensions, description) - """ - formats = [(name, info[0], info[3]) for name, info in - _UNPACK_FORMATS.items()] - formats.sort() - return formats - -def _check_unpack_options(extensions, function, extra_args): - """Checks what gets registered as an unpacker.""" - # first make sure no other unpacker is registered for this extension - existing_extensions = {} - for name, info in _UNPACK_FORMATS.items(): - for ext in info[0]: - existing_extensions[ext] = name - - for extension in extensions: - if extension in existing_extensions: - msg = '%s is already registered for "%s"' - raise RegistryError(msg % (extension, - existing_extensions[extension])) - - if not isinstance(function, collections.Callable): - raise TypeError('The registered function must be a callable') - - -def register_unpack_format(name, extensions, function, extra_args=None, - description=''): - """Registers an unpack format. - - `name` is the name of the format. `extensions` is a list of extensions - corresponding to the format. - - `function` is the callable that will be - used to unpack archives. The callable will receive archives to unpack. - If it's unable to handle an archive, it needs to raise a ReadError - exception. - - If provided, `extra_args` is a sequence of - (name, value) tuples that will be passed as arguments to the callable. - description can be provided to describe the format, and will be returned - by the get_unpack_formats() function. - """ - if extra_args is None: - extra_args = [] - _check_unpack_options(extensions, function, extra_args) - _UNPACK_FORMATS[name] = extensions, function, extra_args, description - -def unregister_unpack_format(name): - """Removes the pack format from the registry.""" - del _UNPACK_FORMATS[name] - -def _ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - if not os.path.isdir(dirname): - os.makedirs(dirname) - -def _unpack_zipfile(filename, extract_dir): - """Unpack zip `filename` to `extract_dir` - """ - try: - import zipfile - except ImportError: - raise ReadError('zlib not supported, cannot unpack this archive.') - - if not zipfile.is_zipfile(filename): - raise ReadError("%s is not a zip file" % filename) - - zip = zipfile.ZipFile(filename) - try: - for info in zip.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name: - continue - - target = os.path.join(extract_dir, *name.split('/')) - if not target: - continue - - _ensure_directory(target) - if not name.endswith('/'): - # file - data = zip.read(info.filename) - f = open(target, 'wb') - try: - f.write(data) - finally: - f.close() - del data - finally: - zip.close() - -def _unpack_tarfile(filename, extract_dir): - """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` - """ - try: - tarobj = tarfile.open(filename) - except tarfile.TarError: - raise ReadError( - "%s is not a compressed or uncompressed tar file" % filename) - try: - tarobj.extractall(extract_dir) - finally: - tarobj.close() - -_UNPACK_FORMATS = { - 'gztar': (['.tar.gz', '.tgz'], _unpack_tarfile, [], "gzip'ed tar-file"), - 'tar': (['.tar'], _unpack_tarfile, [], "uncompressed tar file"), - 'zip': (['.zip'], _unpack_zipfile, [], "ZIP file") - } - -if _BZ2_SUPPORTED: - _UNPACK_FORMATS['bztar'] = (['.bz2'], _unpack_tarfile, [], - "bzip2'ed tar-file") - -def _find_unpack_format(filename): - for name, info in _UNPACK_FORMATS.items(): - for extension in info[0]: - if filename.endswith(extension): - return name - return None - -def unpack_archive(filename, extract_dir=None, format=None): - """Unpack an archive. - - `filename` is the name of the archive. - - `extract_dir` is the name of the target directory, where the archive - is unpacked. If not provided, the current working directory is used. - - `format` is the archive format: one of "zip", "tar", or "gztar". Or any - other registered format. If not provided, unpack_archive will use the - filename extension and see if an unpacker was registered for that - extension. - - In case none is found, a ValueError is raised. - """ - if extract_dir is None: - extract_dir = os.getcwd() - - if format is not None: - try: - format_info = _UNPACK_FORMATS[format] - except KeyError: - raise ValueError("Unknown unpack format '{0}'".format(format)) - - func = format_info[1] - func(filename, extract_dir, **dict(format_info[2])) - else: - # we need to look at the registered unpackers supported extensions - format = _find_unpack_format(filename) - if format is None: - raise ReadError("Unknown archive format '{0}'".format(filename)) - - func = _UNPACK_FORMATS[format][1] - kwargs = dict(_UNPACK_FORMATS[format][2]) - func(filename, extract_dir, **kwargs) diff --git a/src/rez/vendor/distlib/_backport/sysconfig.cfg b/src/rez/vendor/distlib/_backport/sysconfig.cfg deleted file mode 100644 index 1746bd01c..000000000 --- a/src/rez/vendor/distlib/_backport/sysconfig.cfg +++ /dev/null @@ -1,84 +0,0 @@ -[posix_prefix] -# Configuration directories. Some of these come straight out of the -# configure script. They are for implementing the other variables, not to -# be used directly in [resource_locations]. -confdir = /etc -datadir = /usr/share -libdir = /usr/lib -statedir = /var -# User resource directory -local = ~/.local/{distribution.name} - -stdlib = {base}/lib/python{py_version_short} -platstdlib = {platbase}/lib/python{py_version_short} -purelib = {base}/lib/python{py_version_short}/site-packages -platlib = {platbase}/lib/python{py_version_short}/site-packages -include = {base}/include/python{py_version_short}{abiflags} -platinclude = {platbase}/include/python{py_version_short}{abiflags} -data = {base} - -[posix_home] -stdlib = {base}/lib/python -platstdlib = {base}/lib/python -purelib = {base}/lib/python -platlib = {base}/lib/python -include = {base}/include/python -platinclude = {base}/include/python -scripts = {base}/bin -data = {base} - -[nt] -stdlib = {base}/Lib -platstdlib = {base}/Lib -purelib = {base}/Lib/site-packages -platlib = {base}/Lib/site-packages -include = {base}/Include -platinclude = {base}/Include -scripts = {base}/Scripts -data = {base} - -[os2] -stdlib = {base}/Lib -platstdlib = {base}/Lib -purelib = {base}/Lib/site-packages -platlib = {base}/Lib/site-packages -include = {base}/Include -platinclude = {base}/Include -scripts = {base}/Scripts -data = {base} - -[os2_home] -stdlib = {userbase}/lib/python{py_version_short} -platstdlib = {userbase}/lib/python{py_version_short} -purelib = {userbase}/lib/python{py_version_short}/site-packages -platlib = {userbase}/lib/python{py_version_short}/site-packages -include = {userbase}/include/python{py_version_short} -scripts = {userbase}/bin -data = {userbase} - -[nt_user] -stdlib = {userbase}/Python{py_version_nodot} -platstdlib = {userbase}/Python{py_version_nodot} -purelib = {userbase}/Python{py_version_nodot}/site-packages -platlib = {userbase}/Python{py_version_nodot}/site-packages -include = {userbase}/Python{py_version_nodot}/Include -scripts = {userbase}/Scripts -data = {userbase} - -[posix_user] -stdlib = {userbase}/lib/python{py_version_short} -platstdlib = {userbase}/lib/python{py_version_short} -purelib = {userbase}/lib/python{py_version_short}/site-packages -platlib = {userbase}/lib/python{py_version_short}/site-packages -include = {userbase}/include/python{py_version_short} -scripts = {userbase}/bin -data = {userbase} - -[osx_framework_user] -stdlib = {userbase}/lib/python -platstdlib = {userbase}/lib/python -purelib = {userbase}/lib/python/site-packages -platlib = {userbase}/lib/python/site-packages -include = {userbase}/include -scripts = {userbase}/bin -data = {userbase} diff --git a/src/rez/vendor/distlib/_backport/sysconfig.py b/src/rez/vendor/distlib/_backport/sysconfig.py deleted file mode 100644 index 1df3aba14..000000000 --- a/src/rez/vendor/distlib/_backport/sysconfig.py +++ /dev/null @@ -1,788 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright (C) 2012 The Python Software Foundation. -# See LICENSE.txt and CONTRIBUTORS.txt. -# -"""Access to Python's configuration information.""" - -import codecs -import os -import re -import sys -from os.path import pardir, realpath -try: - import configparser -except ImportError: - import ConfigParser as configparser - - -__all__ = [ - 'get_config_h_filename', - 'get_config_var', - 'get_config_vars', - 'get_makefile_filename', - 'get_path', - 'get_path_names', - 'get_paths', - 'get_platform', - 'get_python_version', - 'get_scheme_names', - 'parse_config_h', -] - - -def _safe_realpath(path): - try: - return realpath(path) - except OSError: - return path - - -if sys.executable: - _PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable)) -else: - # sys.executable can be empty if argv[0] has been changed and Python is - # unable to retrieve the real program name - _PROJECT_BASE = _safe_realpath(os.getcwd()) - -if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir)) -# PC/VS7.1 -if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) -# PC/AMD64 -if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower(): - _PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir)) - - -def is_python_build(): - for fn in ("Setup.dist", "Setup.local"): - if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)): - return True - return False - -_PYTHON_BUILD = is_python_build() - -_cfg_read = False - -def _ensure_cfg_read(): - global _cfg_read - if not _cfg_read: - from ..resources import finder - backport_package = __name__.rsplit('.', 1)[0] - _finder = finder(backport_package) - _cfgfile = _finder.find('sysconfig.cfg') - assert _cfgfile, 'sysconfig.cfg exists' - with _cfgfile.as_stream() as s: - _SCHEMES.readfp(s) - if _PYTHON_BUILD: - for scheme in ('posix_prefix', 'posix_home'): - _SCHEMES.set(scheme, 'include', '{srcdir}/Include') - _SCHEMES.set(scheme, 'platinclude', '{projectbase}/.') - - _cfg_read = True - - -_SCHEMES = configparser.RawConfigParser() -_VAR_REPL = re.compile(r'\{([^{]*?)\}') - -def _expand_globals(config): - _ensure_cfg_read() - if config.has_section('globals'): - globals = config.items('globals') - else: - globals = tuple() - - sections = config.sections() - for section in sections: - if section == 'globals': - continue - for option, value in globals: - if config.has_option(section, option): - continue - config.set(section, option, value) - config.remove_section('globals') - - # now expanding local variables defined in the cfg file - # - for section in config.sections(): - variables = dict(config.items(section)) - - def _replacer(matchobj): - name = matchobj.group(1) - if name in variables: - return variables[name] - return matchobj.group(0) - - for option, value in config.items(section): - config.set(section, option, _VAR_REPL.sub(_replacer, value)) - -#_expand_globals(_SCHEMES) - - # FIXME don't rely on sys.version here, its format is an implementation detail - # of CPython, use sys.version_info or sys.hexversion -_PY_VERSION = sys.version.split()[0] -_PY_VERSION_SHORT = sys.version[:3] -_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2] -_PREFIX = os.path.normpath(sys.prefix) -_EXEC_PREFIX = os.path.normpath(sys.exec_prefix) -_CONFIG_VARS = None -_USER_BASE = None - - -def _subst_vars(path, local_vars): - """In the string `path`, replace tokens like {some.thing} with the - corresponding value from the map `local_vars`. - - If there is no corresponding value, leave the token unchanged. - """ - def _replacer(matchobj): - name = matchobj.group(1) - if name in local_vars: - return local_vars[name] - elif name in os.environ: - return os.environ[name] - return matchobj.group(0) - return _VAR_REPL.sub(_replacer, path) - - -def _extend_dict(target_dict, other_dict): - target_keys = target_dict.keys() - for key, value in other_dict.items(): - if key in target_keys: - continue - target_dict[key] = value - - -def _expand_vars(scheme, vars): - res = {} - if vars is None: - vars = {} - _extend_dict(vars, get_config_vars()) - - for key, value in _SCHEMES.items(scheme): - if os.name in ('posix', 'nt'): - value = os.path.expanduser(value) - res[key] = os.path.normpath(_subst_vars(value, vars)) - return res - - -def format_value(value, vars): - def _replacer(matchobj): - name = matchobj.group(1) - if name in vars: - return vars[name] - return matchobj.group(0) - return _VAR_REPL.sub(_replacer, value) - - -def _get_default_scheme(): - if os.name == 'posix': - # the default scheme for posix is posix_prefix - return 'posix_prefix' - return os.name - - -def _getuserbase(): - env_base = os.environ.get("PYTHONUSERBASE", None) - - def joinuser(*args): - return os.path.expanduser(os.path.join(*args)) - - # what about 'os2emx', 'riscos' ? - if os.name == "nt": - base = os.environ.get("APPDATA") or "~" - if env_base: - return env_base - else: - return joinuser(base, "Python") - - if sys.platform == "darwin": - framework = get_config_var("PYTHONFRAMEWORK") - if framework: - if env_base: - return env_base - else: - return joinuser("~", "Library", framework, "%d.%d" % - sys.version_info[:2]) - - if env_base: - return env_base - else: - return joinuser("~", ".local") - - -def _parse_makefile(filename, vars=None): - """Parse a Makefile-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - # Regexes needed for parsing Makefile (and similar syntaxes, - # like old-style Setup files). - _variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)") - _findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)") - _findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}") - - if vars is None: - vars = {} - done = {} - notdone = {} - - with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f: - lines = f.readlines() - - for line in lines: - if line.startswith('#') or line.strip() == '': - continue - m = _variable_rx.match(line) - if m: - n, v = m.group(1, 2) - v = v.strip() - # `$$' is a literal `$' in make - tmpv = v.replace('$$', '') - - if "$" in tmpv: - notdone[n] = v - else: - try: - v = int(v) - except ValueError: - # insert literal `$' - done[n] = v.replace('$$', '$') - else: - done[n] = v - - # do variable interpolation here - variables = list(notdone.keys()) - - # Variables with a 'PY_' prefix in the makefile. These need to - # be made available without that prefix through sysconfig. - # Special care is needed to ensure that variable expansion works, even - # if the expansion uses the name without a prefix. - renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS') - - while len(variables) > 0: - for name in tuple(variables): - value = notdone[name] - m = _findvar1_rx.search(value) or _findvar2_rx.search(value) - if m is not None: - n = m.group(1) - found = True - if n in done: - item = str(done[n]) - elif n in notdone: - # get it on a subsequent round - found = False - elif n in os.environ: - # do it like make: fall back to environment - item = os.environ[n] - - elif n in renamed_variables: - if (name.startswith('PY_') and - name[3:] in renamed_variables): - item = "" - - elif 'PY_' + n in notdone: - found = False - - else: - item = str(done['PY_' + n]) - - else: - done[n] = item = "" - - if found: - after = value[m.end():] - value = value[:m.start()] + item + after - if "$" in after: - notdone[name] = value - else: - try: - value = int(value) - except ValueError: - done[name] = value.strip() - else: - done[name] = value - variables.remove(name) - - if (name.startswith('PY_') and - name[3:] in renamed_variables): - - name = name[3:] - if name not in done: - done[name] = value - - else: - # bogus variable reference (e.g. "prefix=$/opt/python"); - # just drop it since we can't deal - done[name] = value - variables.remove(name) - - # strip spurious spaces - for k, v in done.items(): - if isinstance(v, str): - done[k] = v.strip() - - # save the results in the global dictionary - vars.update(done) - return vars - - -def get_makefile_filename(): - """Return the path of the Makefile.""" - if _PYTHON_BUILD: - return os.path.join(_PROJECT_BASE, "Makefile") - if hasattr(sys, 'abiflags'): - config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags) - else: - config_dir_name = 'config' - return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile') - - -def _init_posix(vars): - """Initialize the module as appropriate for POSIX systems.""" - # load the installed Makefile: - makefile = get_makefile_filename() - try: - _parse_makefile(makefile, vars) - except IOError as e: - msg = "invalid Python installation: unable to open %s" % makefile - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - # load the installed pyconfig.h: - config_h = get_config_h_filename() - try: - with open(config_h) as f: - parse_config_h(f, vars) - except IOError as e: - msg = "invalid Python installation: unable to open %s" % config_h - if hasattr(e, "strerror"): - msg = msg + " (%s)" % e.strerror - raise IOError(msg) - # On AIX, there are wrong paths to the linker scripts in the Makefile - # -- these paths are relative to the Python source, but when installed - # the scripts are in another directory. - if _PYTHON_BUILD: - vars['LDSHARED'] = vars['BLDSHARED'] - - -def _init_non_posix(vars): - """Initialize the module as appropriate for NT""" - # set basic install directories - vars['LIBDEST'] = get_path('stdlib') - vars['BINLIBDEST'] = get_path('platstdlib') - vars['INCLUDEPY'] = get_path('include') - vars['SO'] = '.pyd' - vars['EXE'] = '.exe' - vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT - vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable)) - -# -# public APIs -# - - -def parse_config_h(fp, vars=None): - """Parse a config.h-style file. - - A dictionary containing name/value pairs is returned. If an - optional dictionary is passed in as the second argument, it is - used instead of a new dictionary. - """ - if vars is None: - vars = {} - define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n") - undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n") - - while True: - line = fp.readline() - if not line: - break - m = define_rx.match(line) - if m: - n, v = m.group(1, 2) - try: - v = int(v) - except ValueError: - pass - vars[n] = v - else: - m = undef_rx.match(line) - if m: - vars[m.group(1)] = 0 - return vars - - -def get_config_h_filename(): - """Return the path of pyconfig.h.""" - if _PYTHON_BUILD: - if os.name == "nt": - inc_dir = os.path.join(_PROJECT_BASE, "PC") - else: - inc_dir = _PROJECT_BASE - else: - inc_dir = get_path('platinclude') - return os.path.join(inc_dir, 'pyconfig.h') - - -def get_scheme_names(): - """Return a tuple containing the schemes names.""" - return tuple(sorted(_SCHEMES.sections())) - - -def get_path_names(): - """Return a tuple containing the paths names.""" - # xxx see if we want a static list - return _SCHEMES.options('posix_prefix') - - -def get_paths(scheme=_get_default_scheme(), vars=None, expand=True): - """Return a mapping containing an install scheme. - - ``scheme`` is the install scheme name. If not provided, it will - return the default scheme for the current platform. - """ - _ensure_cfg_read() - if expand: - return _expand_vars(scheme, vars) - else: - return dict(_SCHEMES.items(scheme)) - - -def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True): - """Return a path corresponding to the scheme. - - ``scheme`` is the install scheme name. - """ - return get_paths(scheme, vars, expand)[name] - - -def get_config_vars(*args): - """With no arguments, return a dictionary of all configuration - variables relevant for the current platform. - - On Unix, this means every variable defined in Python's installed Makefile; - On Windows and Mac OS it's a much smaller set. - - With arguments, return a list of values that result from looking up - each argument in the configuration variable dictionary. - """ - global _CONFIG_VARS - if _CONFIG_VARS is None: - _CONFIG_VARS = {} - # Normalized versions of prefix and exec_prefix are handy to have; - # in fact, these are the standard versions used most places in the - # distutils2 module. - _CONFIG_VARS['prefix'] = _PREFIX - _CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX - _CONFIG_VARS['py_version'] = _PY_VERSION - _CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT - _CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2] - _CONFIG_VARS['base'] = _PREFIX - _CONFIG_VARS['platbase'] = _EXEC_PREFIX - _CONFIG_VARS['projectbase'] = _PROJECT_BASE - try: - _CONFIG_VARS['abiflags'] = sys.abiflags - except AttributeError: - # sys.abiflags may not be defined on all platforms. - _CONFIG_VARS['abiflags'] = '' - - if os.name in ('nt', 'os2'): - _init_non_posix(_CONFIG_VARS) - if os.name == 'posix': - _init_posix(_CONFIG_VARS) - # Setting 'userbase' is done below the call to the - # init function to enable using 'get_config_var' in - # the init-function. - if sys.version >= '2.6': - _CONFIG_VARS['userbase'] = _getuserbase() - - if 'srcdir' not in _CONFIG_VARS: - _CONFIG_VARS['srcdir'] = _PROJECT_BASE - else: - _CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir']) - - # Convert srcdir into an absolute path if it appears necessary. - # Normally it is relative to the build directory. However, during - # testing, for example, we might be running a non-installed python - # from a different directory. - if _PYTHON_BUILD and os.name == "posix": - base = _PROJECT_BASE - try: - cwd = os.getcwd() - except OSError: - cwd = None - if (not os.path.isabs(_CONFIG_VARS['srcdir']) and - base != cwd): - # srcdir is relative and we are not in the same directory - # as the executable. Assume executable is in the build - # directory and make srcdir absolute. - srcdir = os.path.join(base, _CONFIG_VARS['srcdir']) - _CONFIG_VARS['srcdir'] = os.path.normpath(srcdir) - - if sys.platform == 'darwin': - kernel_version = os.uname()[2] # Kernel version (8.4.3) - major_version = int(kernel_version.split('.')[0]) - - if major_version < 8: - # On Mac OS X before 10.4, check if -arch and -isysroot - # are in CFLAGS or LDFLAGS and remove them if they are. - # This is needed when building extensions on a 10.3 system - # using a universal build of python. - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - flags = _CONFIG_VARS[key] - flags = re.sub(r'-arch\s+\w+\s', ' ', flags) - flags = re.sub('-isysroot [^ \t]*', ' ', flags) - _CONFIG_VARS[key] = flags - else: - # Allow the user to override the architecture flags using - # an environment variable. - # NOTE: This name was introduced by Apple in OSX 10.5 and - # is used by several scripting languages distributed with - # that OS release. - if 'ARCHFLAGS' in os.environ: - arch = os.environ['ARCHFLAGS'] - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _CONFIG_VARS[key] - flags = re.sub(r'-arch\s+\w+\s', ' ', flags) - flags = flags + ' ' + arch - _CONFIG_VARS[key] = flags - - # If we're on OSX 10.5 or later and the user tries to - # compiles an extension using an SDK that is not present - # on the current machine it is better to not use an SDK - # than to fail. - # - # The major usecase for this is users using a Python.org - # binary installer on OSX 10.6: that installer uses - # the 10.4u SDK, but that SDK is not installed by default - # when you install Xcode. - # - CFLAGS = _CONFIG_VARS.get('CFLAGS', '') - m = re.search(r'-isysroot\s+(\S+)', CFLAGS) - if m is not None: - sdk = m.group(1) - if not os.path.exists(sdk): - for key in ('LDFLAGS', 'BASECFLAGS', - # a number of derived variables. These need to be - # patched up as well. - 'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'): - - flags = _CONFIG_VARS[key] - flags = re.sub(r'-isysroot\s+\S+(\s|$)', ' ', flags) - _CONFIG_VARS[key] = flags - - if args: - vals = [] - for name in args: - vals.append(_CONFIG_VARS.get(name)) - return vals - else: - return _CONFIG_VARS - - -def get_config_var(name): - """Return the value of a single variable using the dictionary returned by - 'get_config_vars()'. - - Equivalent to get_config_vars().get(name) - """ - return get_config_vars().get(name) - - -def get_platform(): - """Return a string that identifies the current platform. - - This is used mainly to distinguish platform-specific build directories and - platform-specific built distributions. Typically includes the OS name - and version and the architecture (as supplied by 'os.uname()'), - although the exact information included depends on the OS; eg. for IRIX - the architecture isn't particularly important (IRIX only runs on SGI - hardware), but for Linux the kernel version isn't particularly - important. - - Examples of returned values: - linux-i586 - linux-alpha (?) - solaris-2.6-sun4u - irix-5.3 - irix64-6.2 - - Windows will return one of: - win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) - win-ia64 (64bit Windows on Itanium) - win32 (all others - specifically, sys.platform is returned) - - For other non-POSIX platforms, currently just returns 'sys.platform'. - """ - if os.name == 'nt': - # sniff sys.version for architecture. - prefix = " bit (" - i = sys.version.find(prefix) - if i == -1: - return sys.platform - j = sys.version.find(")", i) - look = sys.version[i+len(prefix):j].lower() - if look == 'amd64': - return 'win-amd64' - if look == 'itanium': - return 'win-ia64' - return sys.platform - - if os.name != "posix" or not hasattr(os, 'uname'): - # XXX what about the architecture? NT is Intel or Alpha, - # Mac OS is M68k or PPC, etc. - return sys.platform - - # Try to distinguish various flavours of Unix - osname, host, release, version, machine = os.uname() - - # Convert the OS name to lowercase, remove '/' characters - # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") - osname = osname.lower().replace('/', '') - machine = machine.replace(' ', '_') - machine = machine.replace('/', '-') - - if osname[:5] == "linux": - # At least on Linux/Intel, 'machine' is the processor -- - # i386, etc. - # XXX what about Alpha, SPARC, etc? - return "%s-%s" % (osname, machine) - elif osname[:5] == "sunos": - if release[0] >= "5": # SunOS 5 == Solaris 2 - osname = "solaris" - release = "%d.%s" % (int(release[0]) - 3, release[2:]) - # fall through to standard osname-release-machine representation - elif osname[:4] == "irix": # could be "irix64"! - return "%s-%s" % (osname, release) - elif osname[:3] == "aix": - return "%s-%s.%s" % (osname, version, release) - elif osname[:6] == "cygwin": - osname = "cygwin" - rel_re = re.compile(r'[\d.]+') - m = rel_re.match(release) - if m: - release = m.group() - elif osname[:6] == "darwin": - # - # For our purposes, we'll assume that the system version from - # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set - # to. This makes the compatibility story a bit more sane because the - # machine is going to compile and link as if it were - # MACOSX_DEPLOYMENT_TARGET. - cfgvars = get_config_vars() - macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') - - if True: - # Always calculate the release of the running machine, - # needed to determine if we can build fat binaries or not. - - macrelease = macver - # Get the system version. Reading this plist is a documented - # way to get the system version (see the documentation for - # the Gestalt Manager) - try: - f = open('/System/Library/CoreServices/SystemVersion.plist') - except IOError: - # We're on a plain darwin box, fall back to the default - # behaviour. - pass - else: - try: - m = re.search(r'ProductUserVisibleVersion\s*' - r'(.*?)', f.read()) - finally: - f.close() - if m is not None: - macrelease = '.'.join(m.group(1).split('.')[:2]) - # else: fall back to the default behaviour - - if not macver: - macver = macrelease - - if macver: - release = macver - osname = "macosx" - - if ((macrelease + '.') >= '10.4.' and - '-arch' in get_config_vars().get('CFLAGS', '').strip()): - # The universal build will build fat binaries, but not on - # systems before 10.4 - # - # Try to detect 4-way universal builds, those have machine-type - # 'universal' instead of 'fat'. - - machine = 'fat' - cflags = get_config_vars().get('CFLAGS') - - archs = re.findall(r'-arch\s+(\S+)', cflags) - archs = tuple(sorted(set(archs))) - - if len(archs) == 1: - machine = archs[0] - elif archs == ('i386', 'ppc'): - machine = 'fat' - elif archs == ('i386', 'x86_64'): - machine = 'intel' - elif archs == ('i386', 'ppc', 'x86_64'): - machine = 'fat3' - elif archs == ('ppc64', 'x86_64'): - machine = 'fat64' - elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): - machine = 'universal' - else: - raise ValueError( - "Don't know machine value for archs=%r" % (archs,)) - - elif machine == 'i386': - # On OSX the machine type returned by uname is always the - # 32-bit variant, even if the executable architecture is - # the 64-bit variant - if sys.maxsize >= 2**32: - machine = 'x86_64' - - elif machine in ('PowerPC', 'Power_Macintosh'): - # Pick a sane name for the PPC architecture. - # See 'i386' case - if sys.maxsize >= 2**32: - machine = 'ppc64' - else: - machine = 'ppc' - - return "%s-%s-%s" % (osname, release, machine) - - -def get_python_version(): - return _PY_VERSION_SHORT - - -def _print_dict(title, data): - for index, (key, value) in enumerate(sorted(data.items())): - if index == 0: - print('%s: ' % (title)) - print('\t%s = "%s"' % (key, value)) - - -def _main(): - """Display all information sysconfig detains.""" - print('Platform: "%s"' % get_platform()) - print('Python version: "%s"' % get_python_version()) - print('Current installation scheme: "%s"' % _get_default_scheme()) - print() - _print_dict('Paths', get_paths()) - print() - _print_dict('Variables', get_config_vars()) - - -if __name__ == '__main__': - _main() diff --git a/src/rez/vendor/distlib/_backport/tarfile.py b/src/rez/vendor/distlib/_backport/tarfile.py deleted file mode 100644 index d66d85663..000000000 --- a/src/rez/vendor/distlib/_backport/tarfile.py +++ /dev/null @@ -1,2607 +0,0 @@ -#------------------------------------------------------------------- -# tarfile.py -#------------------------------------------------------------------- -# Copyright (C) 2002 Lars Gustaebel -# All rights reserved. -# -# Permission is hereby granted, free of charge, to any person -# obtaining a copy of this software and associated documentation -# files (the "Software"), to deal in the Software without -# restriction, including without limitation the rights to use, -# copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following -# conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -from __future__ import print_function - -"""Read from and write to tar format archives. -""" - -__version__ = "$Revision$" - -version = "0.9.0" -__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)" -__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $" -__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $" -__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend." - -#--------- -# Imports -#--------- -import sys -import os -import stat -import errno -import time -import struct -import copy -import re - -try: - import grp, pwd -except ImportError: - grp = pwd = None - -# os.symlink on Windows prior to 6.0 raises NotImplementedError -symlink_exception = (AttributeError, NotImplementedError) -try: - # WindowsError (1314) will be raised if the caller does not hold the - # SeCreateSymbolicLinkPrivilege privilege - symlink_exception += (WindowsError,) -except NameError: - pass - -# from tarfile import * -__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"] - -if sys.version_info[0] < 3: - import __builtin__ as builtins -else: - import builtins - -_open = builtins.open # Since 'open' is TarFile.open - -#--------------------------------------------------------- -# tar constants -#--------------------------------------------------------- -NUL = b"\0" # the null character -BLOCKSIZE = 512 # length of processing blocks -RECORDSIZE = BLOCKSIZE * 20 # length of records -GNU_MAGIC = b"ustar \0" # magic gnu tar string -POSIX_MAGIC = b"ustar\x0000" # magic posix tar string - -LENGTH_NAME = 100 # maximum length of a filename -LENGTH_LINK = 100 # maximum length of a linkname -LENGTH_PREFIX = 155 # maximum length of the prefix field - -REGTYPE = b"0" # regular file -AREGTYPE = b"\0" # regular file -LNKTYPE = b"1" # link (inside tarfile) -SYMTYPE = b"2" # symbolic link -CHRTYPE = b"3" # character special device -BLKTYPE = b"4" # block special device -DIRTYPE = b"5" # directory -FIFOTYPE = b"6" # fifo special device -CONTTYPE = b"7" # contiguous file - -GNUTYPE_LONGNAME = b"L" # GNU tar longname -GNUTYPE_LONGLINK = b"K" # GNU tar longlink -GNUTYPE_SPARSE = b"S" # GNU tar sparse file - -XHDTYPE = b"x" # POSIX.1-2001 extended header -XGLTYPE = b"g" # POSIX.1-2001 global header -SOLARIS_XHDTYPE = b"X" # Solaris extended header - -USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format -GNU_FORMAT = 1 # GNU tar format -PAX_FORMAT = 2 # POSIX.1-2001 (pax) format -DEFAULT_FORMAT = GNU_FORMAT - -#--------------------------------------------------------- -# tarfile constants -#--------------------------------------------------------- -# File types that tarfile supports: -SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE, - SYMTYPE, DIRTYPE, FIFOTYPE, - CONTTYPE, CHRTYPE, BLKTYPE, - GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# File types that will be treated as a regular file. -REGULAR_TYPES = (REGTYPE, AREGTYPE, - CONTTYPE, GNUTYPE_SPARSE) - -# File types that are part of the GNU tar format. -GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK, - GNUTYPE_SPARSE) - -# Fields from a pax header that override a TarInfo attribute. -PAX_FIELDS = ("path", "linkpath", "size", "mtime", - "uid", "gid", "uname", "gname") - -# Fields from a pax header that are affected by hdrcharset. -PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname")) - -# Fields in a pax header that are numbers, all other fields -# are treated as strings. -PAX_NUMBER_FIELDS = { - "atime": float, - "ctime": float, - "mtime": float, - "uid": int, - "gid": int, - "size": int -} - -#--------------------------------------------------------- -# Bits used in the mode field, values in octal. -#--------------------------------------------------------- -S_IFLNK = 0o120000 # symbolic link -S_IFREG = 0o100000 # regular file -S_IFBLK = 0o060000 # block device -S_IFDIR = 0o040000 # directory -S_IFCHR = 0o020000 # character device -S_IFIFO = 0o010000 # fifo - -TSUID = 0o4000 # set UID on execution -TSGID = 0o2000 # set GID on execution -TSVTX = 0o1000 # reserved - -TUREAD = 0o400 # read by owner -TUWRITE = 0o200 # write by owner -TUEXEC = 0o100 # execute/search by owner -TGREAD = 0o040 # read by group -TGWRITE = 0o020 # write by group -TGEXEC = 0o010 # execute/search by group -TOREAD = 0o004 # read by other -TOWRITE = 0o002 # write by other -TOEXEC = 0o001 # execute/search by other - -#--------------------------------------------------------- -# initialization -#--------------------------------------------------------- -if os.name in ("nt", "ce"): - ENCODING = "utf-8" -else: - ENCODING = sys.getfilesystemencoding() - -#--------------------------------------------------------- -# Some useful functions -#--------------------------------------------------------- - -def stn(s, length, encoding, errors): - """Convert a string to a null-terminated bytes object. - """ - s = s.encode(encoding, errors) - return s[:length] + (length - len(s)) * NUL - -def nts(s, encoding, errors): - """Convert a null-terminated bytes object to a string. - """ - p = s.find(b"\0") - if p != -1: - s = s[:p] - return s.decode(encoding, errors) - -def nti(s): - """Convert a number field to a python number. - """ - # There are two possible encodings for a number field, see - # itn() below. - if s[0] != chr(0o200): - try: - n = int(nts(s, "ascii", "strict") or "0", 8) - except ValueError: - raise InvalidHeaderError("invalid header") - else: - n = 0 - for i in range(len(s) - 1): - n <<= 8 - n += ord(s[i + 1]) - return n - -def itn(n, digits=8, format=DEFAULT_FORMAT): - """Convert a python number to a number field. - """ - # POSIX 1003.1-1988 requires numbers to be encoded as a string of - # octal digits followed by a null-byte, this allows values up to - # (8**(digits-1))-1. GNU tar allows storing numbers greater than - # that if necessary. A leading 0o200 byte indicates this particular - # encoding, the following digits-1 bytes are a big-endian - # representation. This allows values up to (256**(digits-1))-1. - if 0 <= n < 8 ** (digits - 1): - s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL - else: - if format != GNU_FORMAT or n >= 256 ** (digits - 1): - raise ValueError("overflow in number field") - - if n < 0: - # XXX We mimic GNU tar's behaviour with negative numbers, - # this could raise OverflowError. - n = struct.unpack("L", struct.pack("l", n))[0] - - s = bytearray() - for i in range(digits - 1): - s.insert(0, n & 0o377) - n >>= 8 - s.insert(0, 0o200) - return s - -def calc_chksums(buf): - """Calculate the checksum for a member's header by summing up all - characters except for the chksum field which is treated as if - it was filled with spaces. According to the GNU tar sources, - some tars (Sun and NeXT) calculate chksum with signed char, - which will be different if there are chars in the buffer with - the high bit set. So we calculate two checksums, unsigned and - signed. - """ - unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512])) - signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512])) - return unsigned_chksum, signed_chksum - -def copyfileobj(src, dst, length=None): - """Copy length bytes from fileobj src to fileobj dst. - If length is None, copy the entire content. - """ - if length == 0: - return - if length is None: - while True: - buf = src.read(16*1024) - if not buf: - break - dst.write(buf) - return - - BUFSIZE = 16 * 1024 - blocks, remainder = divmod(length, BUFSIZE) - for b in range(blocks): - buf = src.read(BUFSIZE) - if len(buf) < BUFSIZE: - raise IOError("end of file reached") - dst.write(buf) - - if remainder != 0: - buf = src.read(remainder) - if len(buf) < remainder: - raise IOError("end of file reached") - dst.write(buf) - return - -filemode_table = ( - ((S_IFLNK, "l"), - (S_IFREG, "-"), - (S_IFBLK, "b"), - (S_IFDIR, "d"), - (S_IFCHR, "c"), - (S_IFIFO, "p")), - - ((TUREAD, "r"),), - ((TUWRITE, "w"),), - ((TUEXEC|TSUID, "s"), - (TSUID, "S"), - (TUEXEC, "x")), - - ((TGREAD, "r"),), - ((TGWRITE, "w"),), - ((TGEXEC|TSGID, "s"), - (TSGID, "S"), - (TGEXEC, "x")), - - ((TOREAD, "r"),), - ((TOWRITE, "w"),), - ((TOEXEC|TSVTX, "t"), - (TSVTX, "T"), - (TOEXEC, "x")) -) - -def filemode(mode): - """Convert a file's mode to a string of the form - -rwxrwxrwx. - Used by TarFile.list() - """ - perm = [] - for table in filemode_table: - for bit, char in table: - if mode & bit == bit: - perm.append(char) - break - else: - perm.append("-") - return "".join(perm) - -class TarError(Exception): - """Base exception.""" - pass -class ExtractError(TarError): - """General exception for extract errors.""" - pass -class ReadError(TarError): - """Exception for unreadable tar archives.""" - pass -class CompressionError(TarError): - """Exception for unavailable compression methods.""" - pass -class StreamError(TarError): - """Exception for unsupported operations on stream-like TarFiles.""" - pass -class HeaderError(TarError): - """Base exception for header errors.""" - pass -class EmptyHeaderError(HeaderError): - """Exception for empty headers.""" - pass -class TruncatedHeaderError(HeaderError): - """Exception for truncated headers.""" - pass -class EOFHeaderError(HeaderError): - """Exception for end of file headers.""" - pass -class InvalidHeaderError(HeaderError): - """Exception for invalid headers.""" - pass -class SubsequentHeaderError(HeaderError): - """Exception for missing and invalid extended headers.""" - pass - -#--------------------------- -# internal stream interface -#--------------------------- -class _LowLevelFile(object): - """Low-level file object. Supports reading and writing. - It is used instead of a regular file object for streaming - access. - """ - - def __init__(self, name, mode): - mode = { - "r": os.O_RDONLY, - "w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC, - }[mode] - if hasattr(os, "O_BINARY"): - mode |= os.O_BINARY - self.fd = os.open(name, mode, 0o666) - - def close(self): - os.close(self.fd) - - def read(self, size): - return os.read(self.fd, size) - - def write(self, s): - os.write(self.fd, s) - -class _Stream(object): - """Class that serves as an adapter between TarFile and - a stream-like object. The stream-like object only - needs to have a read() or write() method and is accessed - blockwise. Use of gzip or bzip2 compression is possible. - A stream-like object could be for example: sys.stdin, - sys.stdout, a socket, a tape device etc. - - _Stream is intended to be used only internally. - """ - - def __init__(self, name, mode, comptype, fileobj, bufsize): - """Construct a _Stream object. - """ - self._extfileobj = True - if fileobj is None: - fileobj = _LowLevelFile(name, mode) - self._extfileobj = False - - if comptype == '*': - # Enable transparent compression detection for the - # stream interface - fileobj = _StreamProxy(fileobj) - comptype = fileobj.getcomptype() - - self.name = name or "" - self.mode = mode - self.comptype = comptype - self.fileobj = fileobj - self.bufsize = bufsize - self.buf = b"" - self.pos = 0 - self.closed = False - - try: - if comptype == "gz": - try: - import zlib - except ImportError: - raise CompressionError("zlib module is not available") - self.zlib = zlib - self.crc = zlib.crc32(b"") - if mode == "r": - self._init_read_gz() - else: - self._init_write_gz() - - if comptype == "bz2": - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - if mode == "r": - self.dbuf = b"" - self.cmp = bz2.BZ2Decompressor() - else: - self.cmp = bz2.BZ2Compressor() - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - def __del__(self): - if hasattr(self, "closed") and not self.closed: - self.close() - - def _init_write_gz(self): - """Initialize for writing with gzip compression. - """ - self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, - -self.zlib.MAX_WBITS, - self.zlib.DEF_MEM_LEVEL, - 0) - timestamp = struct.pack(" self.bufsize: - self.fileobj.write(self.buf[:self.bufsize]) - self.buf = self.buf[self.bufsize:] - - def close(self): - """Close the _Stream object. No operation should be - done on it afterwards. - """ - if self.closed: - return - - if self.mode == "w" and self.comptype != "tar": - self.buf += self.cmp.flush() - - if self.mode == "w" and self.buf: - self.fileobj.write(self.buf) - self.buf = b"" - if self.comptype == "gz": - # The native zlib crc is an unsigned 32-bit integer, but - # the Python wrapper implicitly casts that to a signed C - # long. So, on a 32-bit box self.crc may "look negative", - # while the same crc on a 64-bit box may "look positive". - # To avoid irksome warnings from the `struct` module, force - # it to look positive on all boxes. - self.fileobj.write(struct.pack("= 0: - blocks, remainder = divmod(pos - self.pos, self.bufsize) - for i in range(blocks): - self.read(self.bufsize) - self.read(remainder) - else: - raise StreamError("seeking backwards is not allowed") - return self.pos - - def read(self, size=None): - """Return the next size number of bytes from the stream. - If size is not defined, return all bytes of the stream - up to EOF. - """ - if size is None: - t = [] - while True: - buf = self._read(self.bufsize) - if not buf: - break - t.append(buf) - buf = "".join(t) - else: - buf = self._read(size) - self.pos += len(buf) - return buf - - def _read(self, size): - """Return size bytes from the stream. - """ - if self.comptype == "tar": - return self.__read(size) - - c = len(self.dbuf) - while c < size: - buf = self.__read(self.bufsize) - if not buf: - break - try: - buf = self.cmp.decompress(buf) - except IOError: - raise ReadError("invalid compressed data") - self.dbuf += buf - c += len(buf) - buf = self.dbuf[:size] - self.dbuf = self.dbuf[size:] - return buf - - def __read(self, size): - """Return size bytes from stream. If internal buffer is empty, - read another block from the stream. - """ - c = len(self.buf) - while c < size: - buf = self.fileobj.read(self.bufsize) - if not buf: - break - self.buf += buf - c += len(buf) - buf = self.buf[:size] - self.buf = self.buf[size:] - return buf -# class _Stream - -class _StreamProxy(object): - """Small proxy class that enables transparent compression - detection for the Stream interface (mode 'r|*'). - """ - - def __init__(self, fileobj): - self.fileobj = fileobj - self.buf = self.fileobj.read(BLOCKSIZE) - - def read(self, size): - self.read = self.fileobj.read - return self.buf - - def getcomptype(self): - if self.buf.startswith(b"\037\213\010"): - return "gz" - if self.buf.startswith(b"BZh91"): - return "bz2" - return "tar" - - def close(self): - self.fileobj.close() -# class StreamProxy - -class _BZ2Proxy(object): - """Small proxy class that enables external file object - support for "r:bz2" and "w:bz2" modes. This is actually - a workaround for a limitation in bz2 module's BZ2File - class which (unlike gzip.GzipFile) has no support for - a file object argument. - """ - - blocksize = 16 * 1024 - - def __init__(self, fileobj, mode): - self.fileobj = fileobj - self.mode = mode - self.name = getattr(self.fileobj, "name", None) - self.init() - - def init(self): - import bz2 - self.pos = 0 - if self.mode == "r": - self.bz2obj = bz2.BZ2Decompressor() - self.fileobj.seek(0) - self.buf = b"" - else: - self.bz2obj = bz2.BZ2Compressor() - - def read(self, size): - x = len(self.buf) - while x < size: - raw = self.fileobj.read(self.blocksize) - if not raw: - break - data = self.bz2obj.decompress(raw) - self.buf += data - x += len(data) - - buf = self.buf[:size] - self.buf = self.buf[size:] - self.pos += len(buf) - return buf - - def seek(self, pos): - if pos < self.pos: - self.init() - self.read(pos - self.pos) - - def tell(self): - return self.pos - - def write(self, data): - self.pos += len(data) - raw = self.bz2obj.compress(data) - self.fileobj.write(raw) - - def close(self): - if self.mode == "w": - raw = self.bz2obj.flush() - self.fileobj.write(raw) -# class _BZ2Proxy - -#------------------------ -# Extraction file object -#------------------------ -class _FileInFile(object): - """A thin wrapper around an existing file object that - provides a part of its data as an individual file - object. - """ - - def __init__(self, fileobj, offset, size, blockinfo=None): - self.fileobj = fileobj - self.offset = offset - self.size = size - self.position = 0 - - if blockinfo is None: - blockinfo = [(0, size)] - - # Construct a map with data and zero blocks. - self.map_index = 0 - self.map = [] - lastpos = 0 - realpos = self.offset - for offset, size in blockinfo: - if offset > lastpos: - self.map.append((False, lastpos, offset, None)) - self.map.append((True, offset, offset + size, realpos)) - realpos += size - lastpos = offset + size - if lastpos < self.size: - self.map.append((False, lastpos, self.size, None)) - - def seekable(self): - if not hasattr(self.fileobj, "seekable"): - # XXX gzip.GzipFile and bz2.BZ2File - return True - return self.fileobj.seekable() - - def tell(self): - """Return the current file position. - """ - return self.position - - def seek(self, position): - """Seek to a position in the file. - """ - self.position = position - - def read(self, size=None): - """Read data from the file. - """ - if size is None: - size = self.size - self.position - else: - size = min(size, self.size - self.position) - - buf = b"" - while size > 0: - while True: - data, start, stop, offset = self.map[self.map_index] - if start <= self.position < stop: - break - else: - self.map_index += 1 - if self.map_index == len(self.map): - self.map_index = 0 - length = min(size, stop - self.position) - if data: - self.fileobj.seek(offset + (self.position - start)) - buf += self.fileobj.read(length) - else: - buf += NUL * length - size -= length - self.position += length - return buf -#class _FileInFile - - -class ExFileObject(object): - """File-like object for reading an archive member. - Is returned by TarFile.extractfile(). - """ - blocksize = 1024 - - def __init__(self, tarfile, tarinfo): - self.fileobj = _FileInFile(tarfile.fileobj, - tarinfo.offset_data, - tarinfo.size, - tarinfo.sparse) - self.name = tarinfo.name - self.mode = "r" - self.closed = False - self.size = tarinfo.size - - self.position = 0 - self.buffer = b"" - - def readable(self): - return True - - def writable(self): - return False - - def seekable(self): - return self.fileobj.seekable() - - def read(self, size=None): - """Read at most size bytes from the file. If size is not - present or None, read all data until EOF is reached. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - buf = b"" - if self.buffer: - if size is None: - buf = self.buffer - self.buffer = b"" - else: - buf = self.buffer[:size] - self.buffer = self.buffer[size:] - - if size is None: - buf += self.fileobj.read() - else: - buf += self.fileobj.read(size - len(buf)) - - self.position += len(buf) - return buf - - # XXX TextIOWrapper uses the read1() method. - read1 = read - - def readline(self, size=-1): - """Read one entire line from the file. If size is present - and non-negative, return a string with at most that - size, which may be an incomplete line. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - pos = self.buffer.find(b"\n") + 1 - if pos == 0: - # no newline found. - while True: - buf = self.fileobj.read(self.blocksize) - self.buffer += buf - if not buf or b"\n" in buf: - pos = self.buffer.find(b"\n") + 1 - if pos == 0: - # no newline found. - pos = len(self.buffer) - break - - if size != -1: - pos = min(size, pos) - - buf = self.buffer[:pos] - self.buffer = self.buffer[pos:] - self.position += len(buf) - return buf - - def readlines(self): - """Return a list with all remaining lines. - """ - result = [] - while True: - line = self.readline() - if not line: break - result.append(line) - return result - - def tell(self): - """Return the current file position. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - return self.position - - def seek(self, pos, whence=os.SEEK_SET): - """Seek to a position in the file. - """ - if self.closed: - raise ValueError("I/O operation on closed file") - - if whence == os.SEEK_SET: - self.position = min(max(pos, 0), self.size) - elif whence == os.SEEK_CUR: - if pos < 0: - self.position = max(self.position + pos, 0) - else: - self.position = min(self.position + pos, self.size) - elif whence == os.SEEK_END: - self.position = max(min(self.size + pos, self.size), 0) - else: - raise ValueError("Invalid argument") - - self.buffer = b"" - self.fileobj.seek(self.position) - - def close(self): - """Close the file object. - """ - self.closed = True - - def __iter__(self): - """Get an iterator over the file's lines. - """ - while True: - line = self.readline() - if not line: - break - yield line -#class ExFileObject - -#------------------ -# Exported Classes -#------------------ -class TarInfo(object): - """Informational class which holds the details about an - archive member given by a tar header block. - TarInfo objects are returned by TarFile.getmember(), - TarFile.getmembers() and TarFile.gettarinfo() and are - usually created internally. - """ - - __slots__ = ("name", "mode", "uid", "gid", "size", "mtime", - "chksum", "type", "linkname", "uname", "gname", - "devmajor", "devminor", - "offset", "offset_data", "pax_headers", "sparse", - "tarfile", "_sparse_structs", "_link_target") - - def __init__(self, name=""): - """Construct a TarInfo object. name is the optional name - of the member. - """ - self.name = name # member name - self.mode = 0o644 # file permissions - self.uid = 0 # user id - self.gid = 0 # group id - self.size = 0 # file size - self.mtime = 0 # modification time - self.chksum = 0 # header checksum - self.type = REGTYPE # member type - self.linkname = "" # link name - self.uname = "" # user name - self.gname = "" # group name - self.devmajor = 0 # device major number - self.devminor = 0 # device minor number - - self.offset = 0 # the tar header starts here - self.offset_data = 0 # the file's data starts here - - self.sparse = None # sparse member information - self.pax_headers = {} # pax header information - - # In pax headers the "name" and "linkname" field are called - # "path" and "linkpath". - def _getpath(self): - return self.name - def _setpath(self, name): - self.name = name - path = property(_getpath, _setpath) - - def _getlinkpath(self): - return self.linkname - def _setlinkpath(self, linkname): - self.linkname = linkname - linkpath = property(_getlinkpath, _setlinkpath) - - def __repr__(self): - return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self)) - - def get_info(self): - """Return the TarInfo's attributes as a dictionary. - """ - info = { - "name": self.name, - "mode": self.mode & 0o7777, - "uid": self.uid, - "gid": self.gid, - "size": self.size, - "mtime": self.mtime, - "chksum": self.chksum, - "type": self.type, - "linkname": self.linkname, - "uname": self.uname, - "gname": self.gname, - "devmajor": self.devmajor, - "devminor": self.devminor - } - - if info["type"] == DIRTYPE and not info["name"].endswith("/"): - info["name"] += "/" - - return info - - def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"): - """Return a tar header as a string of 512 byte blocks. - """ - info = self.get_info() - - if format == USTAR_FORMAT: - return self.create_ustar_header(info, encoding, errors) - elif format == GNU_FORMAT: - return self.create_gnu_header(info, encoding, errors) - elif format == PAX_FORMAT: - return self.create_pax_header(info, encoding) - else: - raise ValueError("invalid format") - - def create_ustar_header(self, info, encoding, errors): - """Return the object as a ustar header block. - """ - info["magic"] = POSIX_MAGIC - - if len(info["linkname"]) > LENGTH_LINK: - raise ValueError("linkname is too long") - - if len(info["name"]) > LENGTH_NAME: - info["prefix"], info["name"] = self._posix_split_name(info["name"]) - - return self._create_header(info, USTAR_FORMAT, encoding, errors) - - def create_gnu_header(self, info, encoding, errors): - """Return the object as a GNU header block sequence. - """ - info["magic"] = GNU_MAGIC - - buf = b"" - if len(info["linkname"]) > LENGTH_LINK: - buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors) - - if len(info["name"]) > LENGTH_NAME: - buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors) - - return buf + self._create_header(info, GNU_FORMAT, encoding, errors) - - def create_pax_header(self, info, encoding): - """Return the object as a ustar header block. If it cannot be - represented this way, prepend a pax extended header sequence - with supplement information. - """ - info["magic"] = POSIX_MAGIC - pax_headers = self.pax_headers.copy() - - # Test string fields for values that exceed the field length or cannot - # be represented in ASCII encoding. - for name, hname, length in ( - ("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK), - ("uname", "uname", 32), ("gname", "gname", 32)): - - if hname in pax_headers: - # The pax header has priority. - continue - - # Try to encode the string as ASCII. - try: - info[name].encode("ascii", "strict") - except UnicodeEncodeError: - pax_headers[hname] = info[name] - continue - - if len(info[name]) > length: - pax_headers[hname] = info[name] - - # Test number fields for values that exceed the field limit or values - # that like to be stored as float. - for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)): - if name in pax_headers: - # The pax header has priority. Avoid overflow. - info[name] = 0 - continue - - val = info[name] - if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float): - pax_headers[name] = str(val) - info[name] = 0 - - # Create a pax extended header if necessary. - if pax_headers: - buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding) - else: - buf = b"" - - return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace") - - @classmethod - def create_pax_global_header(cls, pax_headers): - """Return the object as a pax global header block sequence. - """ - return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8") - - def _posix_split_name(self, name): - """Split a name longer than 100 chars into a prefix - and a name part. - """ - prefix = name[:LENGTH_PREFIX + 1] - while prefix and prefix[-1] != "/": - prefix = prefix[:-1] - - name = name[len(prefix):] - prefix = prefix[:-1] - - if not prefix or len(name) > LENGTH_NAME: - raise ValueError("name is too long") - return prefix, name - - @staticmethod - def _create_header(info, format, encoding, errors): - """Return a header block. info is a dictionary with file - information, format must be one of the *_FORMAT constants. - """ - parts = [ - stn(info.get("name", ""), 100, encoding, errors), - itn(info.get("mode", 0) & 0o7777, 8, format), - itn(info.get("uid", 0), 8, format), - itn(info.get("gid", 0), 8, format), - itn(info.get("size", 0), 12, format), - itn(info.get("mtime", 0), 12, format), - b" ", # checksum field - info.get("type", REGTYPE), - stn(info.get("linkname", ""), 100, encoding, errors), - info.get("magic", POSIX_MAGIC), - stn(info.get("uname", ""), 32, encoding, errors), - stn(info.get("gname", ""), 32, encoding, errors), - itn(info.get("devmajor", 0), 8, format), - itn(info.get("devminor", 0), 8, format), - stn(info.get("prefix", ""), 155, encoding, errors) - ] - - buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts)) - chksum = calc_chksums(buf[-BLOCKSIZE:])[0] - buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:] - return buf - - @staticmethod - def _create_payload(payload): - """Return the string payload filled with zero bytes - up to the next 512 byte border. - """ - blocks, remainder = divmod(len(payload), BLOCKSIZE) - if remainder > 0: - payload += (BLOCKSIZE - remainder) * NUL - return payload - - @classmethod - def _create_gnu_long_header(cls, name, type, encoding, errors): - """Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence - for name. - """ - name = name.encode(encoding, errors) + NUL - - info = {} - info["name"] = "././@LongLink" - info["type"] = type - info["size"] = len(name) - info["magic"] = GNU_MAGIC - - # create extended header + name blocks. - return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \ - cls._create_payload(name) - - @classmethod - def _create_pax_generic_header(cls, pax_headers, type, encoding): - """Return a POSIX.1-2008 extended or global header sequence - that contains a list of keyword, value pairs. The values - must be strings. - """ - # Check if one of the fields contains surrogate characters and thereby - # forces hdrcharset=BINARY, see _proc_pax() for more information. - binary = False - for keyword, value in pax_headers.items(): - try: - value.encode("utf8", "strict") - except UnicodeEncodeError: - binary = True - break - - records = b"" - if binary: - # Put the hdrcharset field at the beginning of the header. - records += b"21 hdrcharset=BINARY\n" - - for keyword, value in pax_headers.items(): - keyword = keyword.encode("utf8") - if binary: - # Try to restore the original byte representation of `value'. - # Needless to say, that the encoding must match the string. - value = value.encode(encoding, "surrogateescape") - else: - value = value.encode("utf8") - - l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n' - n = p = 0 - while True: - n = l + len(str(p)) - if n == p: - break - p = n - records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n" - - # We use a hardcoded "././@PaxHeader" name like star does - # instead of the one that POSIX recommends. - info = {} - info["name"] = "././@PaxHeader" - info["type"] = type - info["size"] = len(records) - info["magic"] = POSIX_MAGIC - - # Create pax header + record blocks. - return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \ - cls._create_payload(records) - - @classmethod - def frombuf(cls, buf, encoding, errors): - """Construct a TarInfo object from a 512 byte bytes object. - """ - if len(buf) == 0: - raise EmptyHeaderError("empty header") - if len(buf) != BLOCKSIZE: - raise TruncatedHeaderError("truncated header") - if buf.count(NUL) == BLOCKSIZE: - raise EOFHeaderError("end of file header") - - chksum = nti(buf[148:156]) - if chksum not in calc_chksums(buf): - raise InvalidHeaderError("bad checksum") - - obj = cls() - obj.name = nts(buf[0:100], encoding, errors) - obj.mode = nti(buf[100:108]) - obj.uid = nti(buf[108:116]) - obj.gid = nti(buf[116:124]) - obj.size = nti(buf[124:136]) - obj.mtime = nti(buf[136:148]) - obj.chksum = chksum - obj.type = buf[156:157] - obj.linkname = nts(buf[157:257], encoding, errors) - obj.uname = nts(buf[265:297], encoding, errors) - obj.gname = nts(buf[297:329], encoding, errors) - obj.devmajor = nti(buf[329:337]) - obj.devminor = nti(buf[337:345]) - prefix = nts(buf[345:500], encoding, errors) - - # Old V7 tar format represents a directory as a regular - # file with a trailing slash. - if obj.type == AREGTYPE and obj.name.endswith("/"): - obj.type = DIRTYPE - - # The old GNU sparse format occupies some of the unused - # space in the buffer for up to 4 sparse structures. - # Save the them for later processing in _proc_sparse(). - if obj.type == GNUTYPE_SPARSE: - pos = 386 - structs = [] - for i in range(4): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[482]) - origsize = nti(buf[483:495]) - obj._sparse_structs = (structs, isextended, origsize) - - # Remove redundant slashes from directories. - if obj.isdir(): - obj.name = obj.name.rstrip("/") - - # Reconstruct a ustar longname. - if prefix and obj.type not in GNU_TYPES: - obj.name = prefix + "/" + obj.name - return obj - - @classmethod - def fromtarfile(cls, tarfile): - """Return the next TarInfo object from TarFile object - tarfile. - """ - buf = tarfile.fileobj.read(BLOCKSIZE) - obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors) - obj.offset = tarfile.fileobj.tell() - BLOCKSIZE - return obj._proc_member(tarfile) - - #-------------------------------------------------------------------------- - # The following are methods that are called depending on the type of a - # member. The entry point is _proc_member() which can be overridden in a - # subclass to add custom _proc_*() methods. A _proc_*() method MUST - # implement the following - # operations: - # 1. Set self.offset_data to the position where the data blocks begin, - # if there is data that follows. - # 2. Set tarfile.offset to the position where the next member's header will - # begin. - # 3. Return self or another valid TarInfo object. - def _proc_member(self, tarfile): - """Choose the right processing method depending on - the type and call it. - """ - if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK): - return self._proc_gnulong(tarfile) - elif self.type == GNUTYPE_SPARSE: - return self._proc_sparse(tarfile) - elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE): - return self._proc_pax(tarfile) - else: - return self._proc_builtin(tarfile) - - def _proc_builtin(self, tarfile): - """Process a builtin type or an unknown type which - will be treated as a regular file. - """ - self.offset_data = tarfile.fileobj.tell() - offset = self.offset_data - if self.isreg() or self.type not in SUPPORTED_TYPES: - # Skip the following data blocks. - offset += self._block(self.size) - tarfile.offset = offset - - # Patch the TarInfo object with saved global - # header information. - self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors) - - return self - - def _proc_gnulong(self, tarfile): - """Process the blocks that hold a GNU longname - or longlink member. - """ - buf = tarfile.fileobj.read(self._block(self.size)) - - # Fetch the next header and process it. - try: - next = self.fromtarfile(tarfile) - except HeaderError: - raise SubsequentHeaderError("missing or bad subsequent header") - - # Patch the TarInfo object from the next header with - # the longname information. - next.offset = self.offset - if self.type == GNUTYPE_LONGNAME: - next.name = nts(buf, tarfile.encoding, tarfile.errors) - elif self.type == GNUTYPE_LONGLINK: - next.linkname = nts(buf, tarfile.encoding, tarfile.errors) - - return next - - def _proc_sparse(self, tarfile): - """Process a GNU sparse header plus extra headers. - """ - # We already collected some sparse structures in frombuf(). - structs, isextended, origsize = self._sparse_structs - del self._sparse_structs - - # Collect sparse structures from extended header blocks. - while isextended: - buf = tarfile.fileobj.read(BLOCKSIZE) - pos = 0 - for i in range(21): - try: - offset = nti(buf[pos:pos + 12]) - numbytes = nti(buf[pos + 12:pos + 24]) - except ValueError: - break - if offset and numbytes: - structs.append((offset, numbytes)) - pos += 24 - isextended = bool(buf[504]) - self.sparse = structs - - self.offset_data = tarfile.fileobj.tell() - tarfile.offset = self.offset_data + self._block(self.size) - self.size = origsize - return self - - def _proc_pax(self, tarfile): - """Process an extended or global header as described in - POSIX.1-2008. - """ - # Read the header information. - buf = tarfile.fileobj.read(self._block(self.size)) - - # A pax header stores supplemental information for either - # the following file (extended) or all following files - # (global). - if self.type == XGLTYPE: - pax_headers = tarfile.pax_headers - else: - pax_headers = tarfile.pax_headers.copy() - - # Check if the pax header contains a hdrcharset field. This tells us - # the encoding of the path, linkpath, uname and gname fields. Normally, - # these fields are UTF-8 encoded but since POSIX.1-2008 tar - # implementations are allowed to store them as raw binary strings if - # the translation to UTF-8 fails. - match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf) - if match is not None: - pax_headers["hdrcharset"] = match.group(1).decode("utf8") - - # For the time being, we don't care about anything other than "BINARY". - # The only other value that is currently allowed by the standard is - # "ISO-IR 10646 2000 UTF-8" in other words UTF-8. - hdrcharset = pax_headers.get("hdrcharset") - if hdrcharset == "BINARY": - encoding = tarfile.encoding - else: - encoding = "utf8" - - # Parse pax header information. A record looks like that: - # "%d %s=%s\n" % (length, keyword, value). length is the size - # of the complete record including the length field itself and - # the newline. keyword and value are both UTF-8 encoded strings. - regex = re.compile(br"(\d+) ([^=]+)=") - pos = 0 - while True: - match = regex.match(buf, pos) - if not match: - break - - length, keyword = match.groups() - length = int(length) - value = buf[match.end(2) + 1:match.start(1) + length - 1] - - # Normally, we could just use "utf8" as the encoding and "strict" - # as the error handler, but we better not take the risk. For - # example, GNU tar <= 1.23 is known to store filenames it cannot - # translate to UTF-8 as raw strings (unfortunately without a - # hdrcharset=BINARY header). - # We first try the strict standard encoding, and if that fails we - # fall back on the user's encoding and error handler. - keyword = self._decode_pax_field(keyword, "utf8", "utf8", - tarfile.errors) - if keyword in PAX_NAME_FIELDS: - value = self._decode_pax_field(value, encoding, tarfile.encoding, - tarfile.errors) - else: - value = self._decode_pax_field(value, "utf8", "utf8", - tarfile.errors) - - pax_headers[keyword] = value - pos += length - - # Fetch the next header. - try: - next = self.fromtarfile(tarfile) - except HeaderError: - raise SubsequentHeaderError("missing or bad subsequent header") - - # Process GNU sparse information. - if "GNU.sparse.map" in pax_headers: - # GNU extended sparse format version 0.1. - self._proc_gnusparse_01(next, pax_headers) - - elif "GNU.sparse.size" in pax_headers: - # GNU extended sparse format version 0.0. - self._proc_gnusparse_00(next, pax_headers, buf) - - elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0": - # GNU extended sparse format version 1.0. - self._proc_gnusparse_10(next, pax_headers, tarfile) - - if self.type in (XHDTYPE, SOLARIS_XHDTYPE): - # Patch the TarInfo object with the extended header info. - next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors) - next.offset = self.offset - - if "size" in pax_headers: - # If the extended header replaces the size field, - # we need to recalculate the offset where the next - # header starts. - offset = next.offset_data - if next.isreg() or next.type not in SUPPORTED_TYPES: - offset += next._block(next.size) - tarfile.offset = offset - - return next - - def _proc_gnusparse_00(self, next, pax_headers, buf): - """Process a GNU tar extended sparse header, version 0.0. - """ - offsets = [] - for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf): - offsets.append(int(match.group(1))) - numbytes = [] - for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf): - numbytes.append(int(match.group(1))) - next.sparse = list(zip(offsets, numbytes)) - - def _proc_gnusparse_01(self, next, pax_headers): - """Process a GNU tar extended sparse header, version 0.1. - """ - sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")] - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _proc_gnusparse_10(self, next, pax_headers, tarfile): - """Process a GNU tar extended sparse header, version 1.0. - """ - fields = None - sparse = [] - buf = tarfile.fileobj.read(BLOCKSIZE) - fields, buf = buf.split(b"\n", 1) - fields = int(fields) - while len(sparse) < fields * 2: - if b"\n" not in buf: - buf += tarfile.fileobj.read(BLOCKSIZE) - number, buf = buf.split(b"\n", 1) - sparse.append(int(number)) - next.offset_data = tarfile.fileobj.tell() - next.sparse = list(zip(sparse[::2], sparse[1::2])) - - def _apply_pax_info(self, pax_headers, encoding, errors): - """Replace fields with supplemental information from a previous - pax extended or global header. - """ - for keyword, value in pax_headers.items(): - if keyword == "GNU.sparse.name": - setattr(self, "path", value) - elif keyword == "GNU.sparse.size": - setattr(self, "size", int(value)) - elif keyword == "GNU.sparse.realsize": - setattr(self, "size", int(value)) - elif keyword in PAX_FIELDS: - if keyword in PAX_NUMBER_FIELDS: - try: - value = PAX_NUMBER_FIELDS[keyword](value) - except ValueError: - value = 0 - if keyword == "path": - value = value.rstrip("/") - setattr(self, keyword, value) - - self.pax_headers = pax_headers.copy() - - def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors): - """Decode a single field from a pax record. - """ - try: - return value.decode(encoding, "strict") - except UnicodeDecodeError: - return value.decode(fallback_encoding, fallback_errors) - - def _block(self, count): - """Round up a byte count by BLOCKSIZE and return it, - e.g. _block(834) => 1024. - """ - blocks, remainder = divmod(count, BLOCKSIZE) - if remainder: - blocks += 1 - return blocks * BLOCKSIZE - - def isreg(self): - return self.type in REGULAR_TYPES - def isfile(self): - return self.isreg() - def isdir(self): - return self.type == DIRTYPE - def issym(self): - return self.type == SYMTYPE - def islnk(self): - return self.type == LNKTYPE - def ischr(self): - return self.type == CHRTYPE - def isblk(self): - return self.type == BLKTYPE - def isfifo(self): - return self.type == FIFOTYPE - def issparse(self): - return self.sparse is not None - def isdev(self): - return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE) -# class TarInfo - -class TarFile(object): - """The TarFile Class provides an interface to tar archives. - """ - - debug = 0 # May be set from 0 (no msgs) to 3 (all msgs) - - dereference = False # If true, add content of linked file to the - # tar file, else the link. - - ignore_zeros = False # If true, skips empty or invalid blocks and - # continues processing. - - errorlevel = 1 # If 0, fatal errors only appear in debug - # messages (if debug >= 0). If > 0, errors - # are passed to the caller as exceptions. - - format = DEFAULT_FORMAT # The format to use when creating an archive. - - encoding = ENCODING # Encoding for 8-bit character strings. - - errors = None # Error handler for unicode conversion. - - tarinfo = TarInfo # The default TarInfo class to use. - - fileobject = ExFileObject # The default ExFileObject class to use. - - def __init__(self, name=None, mode="r", fileobj=None, format=None, - tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, - errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None): - """Open an (uncompressed) tar archive `name'. `mode' is either 'r' to - read from an existing archive, 'a' to append data to an existing - file or 'w' to create a new file overwriting an existing one. `mode' - defaults to 'r'. - If `fileobj' is given, it is used for reading or writing data. If it - can be determined, `mode' is overridden by `fileobj's mode. - `fileobj' is not closed, when TarFile is closed. - """ - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - self.mode = mode - self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode] - - if not fileobj: - if self.mode == "a" and not os.path.exists(name): - # Create nonexistent files in append mode. - self.mode = "w" - self._mode = "wb" - fileobj = bltn_open(name, self._mode) - self._extfileobj = False - else: - if name is None and hasattr(fileobj, "name"): - name = fileobj.name - if hasattr(fileobj, "mode"): - self._mode = fileobj.mode - self._extfileobj = True - self.name = os.path.abspath(name) if name else None - self.fileobj = fileobj - - # Init attributes. - if format is not None: - self.format = format - if tarinfo is not None: - self.tarinfo = tarinfo - if dereference is not None: - self.dereference = dereference - if ignore_zeros is not None: - self.ignore_zeros = ignore_zeros - if encoding is not None: - self.encoding = encoding - self.errors = errors - - if pax_headers is not None and self.format == PAX_FORMAT: - self.pax_headers = pax_headers - else: - self.pax_headers = {} - - if debug is not None: - self.debug = debug - if errorlevel is not None: - self.errorlevel = errorlevel - - # Init datastructures. - self.closed = False - self.members = [] # list of members as TarInfo objects - self._loaded = False # flag if all members have been read - self.offset = self.fileobj.tell() - # current position in the archive file - self.inodes = {} # dictionary caching the inodes of - # archive members already added - - try: - if self.mode == "r": - self.firstmember = None - self.firstmember = self.next() - - if self.mode == "a": - # Move to the end of the archive, - # before the first empty block. - while True: - self.fileobj.seek(self.offset) - try: - tarinfo = self.tarinfo.fromtarfile(self) - self.members.append(tarinfo) - except EOFHeaderError: - self.fileobj.seek(self.offset) - break - except HeaderError as e: - raise ReadError(str(e)) - - if self.mode in "aw": - self._loaded = True - - if self.pax_headers: - buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy()) - self.fileobj.write(buf) - self.offset += len(buf) - except: - if not self._extfileobj: - self.fileobj.close() - self.closed = True - raise - - #-------------------------------------------------------------------------- - # Below are the classmethods which act as alternate constructors to the - # TarFile class. The open() method is the only one that is needed for - # public use; it is the "super"-constructor and is able to select an - # adequate "sub"-constructor for a particular compression using the mapping - # from OPEN_METH. - # - # This concept allows one to subclass TarFile without losing the comfort of - # the super-constructor. A sub-constructor is registered and made available - # by adding it to the mapping in OPEN_METH. - - @classmethod - def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs): - """Open a tar archive for reading, writing or appending. Return - an appropriate TarFile class. - - mode: - 'r' or 'r:*' open for reading with transparent compression - 'r:' open for reading exclusively uncompressed - 'r:gz' open for reading with gzip compression - 'r:bz2' open for reading with bzip2 compression - 'a' or 'a:' open for appending, creating the file if necessary - 'w' or 'w:' open for writing without compression - 'w:gz' open for writing with gzip compression - 'w:bz2' open for writing with bzip2 compression - - 'r|*' open a stream of tar blocks with transparent compression - 'r|' open an uncompressed stream of tar blocks for reading - 'r|gz' open a gzip compressed stream of tar blocks - 'r|bz2' open a bzip2 compressed stream of tar blocks - 'w|' open an uncompressed stream for writing - 'w|gz' open a gzip compressed stream for writing - 'w|bz2' open a bzip2 compressed stream for writing - """ - - if not name and not fileobj: - raise ValueError("nothing to open") - - if mode in ("r", "r:*"): - # Find out which *open() is appropriate for opening the file. - for comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - if fileobj is not None: - saved_pos = fileobj.tell() - try: - return func(name, "r", fileobj, **kwargs) - except (ReadError, CompressionError) as e: - if fileobj is not None: - fileobj.seek(saved_pos) - continue - raise ReadError("file could not be opened successfully") - - elif ":" in mode: - filemode, comptype = mode.split(":", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - # Select the *open() function according to - # given compression. - if comptype in cls.OPEN_METH: - func = getattr(cls, cls.OPEN_METH[comptype]) - else: - raise CompressionError("unknown compression type %r" % comptype) - return func(name, filemode, fileobj, **kwargs) - - elif "|" in mode: - filemode, comptype = mode.split("|", 1) - filemode = filemode or "r" - comptype = comptype or "tar" - - if filemode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - stream = _Stream(name, filemode, comptype, fileobj, bufsize) - try: - t = cls(name, filemode, stream, **kwargs) - except: - stream.close() - raise - t._extfileobj = False - return t - - elif mode in "aw": - return cls.taropen(name, mode, fileobj, **kwargs) - - raise ValueError("undiscernible mode") - - @classmethod - def taropen(cls, name, mode="r", fileobj=None, **kwargs): - """Open uncompressed tar archive name for reading or writing. - """ - if len(mode) > 1 or mode not in "raw": - raise ValueError("mode must be 'r', 'a' or 'w'") - return cls(name, mode, fileobj, **kwargs) - - @classmethod - def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open gzip compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'") - - try: - import gzip - gzip.GzipFile - except (ImportError, AttributeError): - raise CompressionError("gzip module is not available") - - extfileobj = fileobj is not None - try: - fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj) - t = cls.taropen(name, mode, fileobj, **kwargs) - except IOError: - if not extfileobj and fileobj is not None: - fileobj.close() - if fileobj is None: - raise - raise ReadError("not a gzip file") - except: - if not extfileobj and fileobj is not None: - fileobj.close() - raise - t._extfileobj = extfileobj - return t - - @classmethod - def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs): - """Open bzip2 compressed tar archive name for reading or writing. - Appending is not allowed. - """ - if len(mode) > 1 or mode not in "rw": - raise ValueError("mode must be 'r' or 'w'.") - - try: - import bz2 - except ImportError: - raise CompressionError("bz2 module is not available") - - if fileobj is not None: - fileobj = _BZ2Proxy(fileobj, mode) - else: - fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel) - - try: - t = cls.taropen(name, mode, fileobj, **kwargs) - except (IOError, EOFError): - fileobj.close() - raise ReadError("not a bzip2 file") - t._extfileobj = False - return t - - # All *open() methods are registered here. - OPEN_METH = { - "tar": "taropen", # uncompressed tar - "gz": "gzopen", # gzip compressed tar - "bz2": "bz2open" # bzip2 compressed tar - } - - #-------------------------------------------------------------------------- - # The public methods which TarFile provides: - - def close(self): - """Close the TarFile. In write-mode, two finishing zero blocks are - appended to the archive. - """ - if self.closed: - return - - if self.mode in "aw": - self.fileobj.write(NUL * (BLOCKSIZE * 2)) - self.offset += (BLOCKSIZE * 2) - # fill up the end with zero-blocks - # (like option -b20 for tar does) - blocks, remainder = divmod(self.offset, RECORDSIZE) - if remainder > 0: - self.fileobj.write(NUL * (RECORDSIZE - remainder)) - - if not self._extfileobj: - self.fileobj.close() - self.closed = True - - def getmember(self, name): - """Return a TarInfo object for member `name'. If `name' can not be - found in the archive, KeyError is raised. If a member occurs more - than once in the archive, its last occurrence is assumed to be the - most up-to-date version. - """ - tarinfo = self._getmember(name) - if tarinfo is None: - raise KeyError("filename %r not found" % name) - return tarinfo - - def getmembers(self): - """Return the members of the archive as a list of TarInfo objects. The - list has the same order as the members in the archive. - """ - self._check() - if not self._loaded: # if we want to obtain a list of - self._load() # all members, we first have to - # scan the whole archive. - return self.members - - def getnames(self): - """Return the members of the archive as a list of their names. It has - the same order as the list returned by getmembers(). - """ - return [tarinfo.name for tarinfo in self.getmembers()] - - def gettarinfo(self, name=None, arcname=None, fileobj=None): - """Create a TarInfo object for either the file `name' or the file - object `fileobj' (using os.fstat on its file descriptor). You can - modify some of the TarInfo's attributes before you add it using - addfile(). If given, `arcname' specifies an alternative name for the - file in the archive. - """ - self._check("aw") - - # When fileobj is given, replace name by - # fileobj's real name. - if fileobj is not None: - name = fileobj.name - - # Building the name of the member in the archive. - # Backward slashes are converted to forward slashes, - # Absolute paths are turned to relative paths. - if arcname is None: - arcname = name - drv, arcname = os.path.splitdrive(arcname) - arcname = arcname.replace(os.sep, "/") - arcname = arcname.lstrip("/") - - # Now, fill the TarInfo object with - # information specific for the file. - tarinfo = self.tarinfo() - tarinfo.tarfile = self - - # Use os.stat or os.lstat, depending on platform - # and if symlinks shall be resolved. - if fileobj is None: - if hasattr(os, "lstat") and not self.dereference: - statres = os.lstat(name) - else: - statres = os.stat(name) - else: - statres = os.fstat(fileobj.fileno()) - linkname = "" - - stmd = statres.st_mode - if stat.S_ISREG(stmd): - inode = (statres.st_ino, statres.st_dev) - if not self.dereference and statres.st_nlink > 1 and \ - inode in self.inodes and arcname != self.inodes[inode]: - # Is it a hardlink to an already - # archived file? - type = LNKTYPE - linkname = self.inodes[inode] - else: - # The inode is added only if its valid. - # For win32 it is always 0. - type = REGTYPE - if inode[0]: - self.inodes[inode] = arcname - elif stat.S_ISDIR(stmd): - type = DIRTYPE - elif stat.S_ISFIFO(stmd): - type = FIFOTYPE - elif stat.S_ISLNK(stmd): - type = SYMTYPE - linkname = os.readlink(name) - elif stat.S_ISCHR(stmd): - type = CHRTYPE - elif stat.S_ISBLK(stmd): - type = BLKTYPE - else: - return None - - # Fill the TarInfo object with all - # information we can get. - tarinfo.name = arcname - tarinfo.mode = stmd - tarinfo.uid = statres.st_uid - tarinfo.gid = statres.st_gid - if type == REGTYPE: - tarinfo.size = statres.st_size - else: - tarinfo.size = 0 - tarinfo.mtime = statres.st_mtime - tarinfo.type = type - tarinfo.linkname = linkname - if pwd: - try: - tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0] - except KeyError: - pass - if grp: - try: - tarinfo.gname = grp.getgrgid(tarinfo.gid)[0] - except KeyError: - pass - - if type in (CHRTYPE, BLKTYPE): - if hasattr(os, "major") and hasattr(os, "minor"): - tarinfo.devmajor = os.major(statres.st_rdev) - tarinfo.devminor = os.minor(statres.st_rdev) - return tarinfo - - def list(self, verbose=True): - """Print a table of contents to sys.stdout. If `verbose' is False, only - the names of the members are printed. If it is True, an `ls -l'-like - output is produced. - """ - self._check() - - for tarinfo in self: - if verbose: - print(filemode(tarinfo.mode), end=' ') - print("%s/%s" % (tarinfo.uname or tarinfo.uid, - tarinfo.gname or tarinfo.gid), end=' ') - if tarinfo.ischr() or tarinfo.isblk(): - print("%10s" % ("%d,%d" \ - % (tarinfo.devmajor, tarinfo.devminor)), end=' ') - else: - print("%10d" % tarinfo.size, end=' ') - print("%d-%02d-%02d %02d:%02d:%02d" \ - % time.localtime(tarinfo.mtime)[:6], end=' ') - - print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ') - - if verbose: - if tarinfo.issym(): - print("->", tarinfo.linkname, end=' ') - if tarinfo.islnk(): - print("link to", tarinfo.linkname, end=' ') - print() - - def add(self, name, arcname=None, recursive=True, exclude=None, filter=None): - """Add the file `name' to the archive. `name' may be any type of file - (directory, fifo, symbolic link, etc.). If given, `arcname' - specifies an alternative name for the file in the archive. - Directories are added recursively by default. This can be avoided by - setting `recursive' to False. `exclude' is a function that should - return True for each filename to be excluded. `filter' is a function - that expects a TarInfo object argument and returns the changed - TarInfo object, if it returns None the TarInfo object will be - excluded from the archive. - """ - self._check("aw") - - if arcname is None: - arcname = name - - # Exclude pathnames. - if exclude is not None: - import warnings - warnings.warn("use the filter argument instead", - DeprecationWarning, 2) - if exclude(name): - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Skip if somebody tries to archive the archive... - if self.name is not None and os.path.abspath(name) == self.name: - self._dbg(2, "tarfile: Skipped %r" % name) - return - - self._dbg(1, name) - - # Create a TarInfo object from the file. - tarinfo = self.gettarinfo(name, arcname) - - if tarinfo is None: - self._dbg(1, "tarfile: Unsupported type %r" % name) - return - - # Change or exclude the TarInfo object. - if filter is not None: - tarinfo = filter(tarinfo) - if tarinfo is None: - self._dbg(2, "tarfile: Excluded %r" % name) - return - - # Append the tar header and data to the archive. - if tarinfo.isreg(): - f = bltn_open(name, "rb") - self.addfile(tarinfo, f) - f.close() - - elif tarinfo.isdir(): - self.addfile(tarinfo) - if recursive: - for f in os.listdir(name): - self.add(os.path.join(name, f), os.path.join(arcname, f), - recursive, exclude, filter=filter) - - else: - self.addfile(tarinfo) - - def addfile(self, tarinfo, fileobj=None): - """Add the TarInfo object `tarinfo' to the archive. If `fileobj' is - given, tarinfo.size bytes are read from it and added to the archive. - You can create TarInfo objects using gettarinfo(). - On Windows platforms, `fileobj' should always be opened with mode - 'rb' to avoid irritation about the file size. - """ - self._check("aw") - - tarinfo = copy.copy(tarinfo) - - buf = tarinfo.tobuf(self.format, self.encoding, self.errors) - self.fileobj.write(buf) - self.offset += len(buf) - - # If there's data to follow, append it. - if fileobj is not None: - copyfileobj(fileobj, self.fileobj, tarinfo.size) - blocks, remainder = divmod(tarinfo.size, BLOCKSIZE) - if remainder > 0: - self.fileobj.write(NUL * (BLOCKSIZE - remainder)) - blocks += 1 - self.offset += blocks * BLOCKSIZE - - self.members.append(tarinfo) - - def extractall(self, path=".", members=None): - """Extract all members from the archive to the current working - directory and set owner, modification time and permissions on - directories afterwards. `path' specifies a different directory - to extract to. `members' is optional and must be a subset of the - list returned by getmembers(). - """ - directories = [] - - if members is None: - members = self - - for tarinfo in members: - if tarinfo.isdir(): - # Extract directories with a safe mode. - directories.append(tarinfo) - tarinfo = copy.copy(tarinfo) - tarinfo.mode = 0o700 - # Do not set_attrs directories, as we will do that further down - self.extract(tarinfo, path, set_attrs=not tarinfo.isdir()) - - # Reverse sort directories. - directories.sort(key=lambda a: a.name) - directories.reverse() - - # Set correct owner, mtime and filemode on directories. - for tarinfo in directories: - dirpath = os.path.join(path, tarinfo.name) - try: - self.chown(tarinfo, dirpath) - self.utime(tarinfo, dirpath) - self.chmod(tarinfo, dirpath) - except ExtractError as e: - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def extract(self, member, path="", set_attrs=True): - """Extract a member from the archive to the current working directory, - using its full name. Its file information is extracted as accurately - as possible. `member' may be a filename or a TarInfo object. You can - specify a different directory using `path'. File attributes (owner, - mtime, mode) are set unless `set_attrs' is False. - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - # Prepare the link target for makelink(). - if tarinfo.islnk(): - tarinfo._link_target = os.path.join(path, tarinfo.linkname) - - try: - self._extract_member(tarinfo, os.path.join(path, tarinfo.name), - set_attrs=set_attrs) - except EnvironmentError as e: - if self.errorlevel > 0: - raise - else: - if e.filename is None: - self._dbg(1, "tarfile: %s" % e.strerror) - else: - self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename)) - except ExtractError as e: - if self.errorlevel > 1: - raise - else: - self._dbg(1, "tarfile: %s" % e) - - def extractfile(self, member): - """Extract a member from the archive as a file object. `member' may be - a filename or a TarInfo object. If `member' is a regular file, a - file-like object is returned. If `member' is a link, a file-like - object is constructed from the link's target. If `member' is none of - the above, None is returned. - The file-like object is read-only and provides the following - methods: read(), readline(), readlines(), seek() and tell() - """ - self._check("r") - - if isinstance(member, str): - tarinfo = self.getmember(member) - else: - tarinfo = member - - if tarinfo.isreg(): - return self.fileobject(self, tarinfo) - - elif tarinfo.type not in SUPPORTED_TYPES: - # If a member's type is unknown, it is treated as a - # regular file. - return self.fileobject(self, tarinfo) - - elif tarinfo.islnk() or tarinfo.issym(): - if isinstance(self.fileobj, _Stream): - # A small but ugly workaround for the case that someone tries - # to extract a (sym)link as a file-object from a non-seekable - # stream of tar blocks. - raise StreamError("cannot extract (sym)link as file object") - else: - # A (sym)link's file object is its target's file object. - return self.extractfile(self._find_link_target(tarinfo)) - else: - # If there's no data associated with the member (directory, chrdev, - # blkdev, etc.), return None instead of a file object. - return None - - def _extract_member(self, tarinfo, targetpath, set_attrs=True): - """Extract the TarInfo object tarinfo to a physical - file called targetpath. - """ - # Fetch the TarInfo object for the given name - # and build the destination pathname, replacing - # forward slashes to platform specific separators. - targetpath = targetpath.rstrip("/") - targetpath = targetpath.replace("/", os.sep) - - # Create all upper directories. - upperdirs = os.path.dirname(targetpath) - if upperdirs and not os.path.exists(upperdirs): - # Create directories that are not part of the archive with - # default permissions. - os.makedirs(upperdirs) - - if tarinfo.islnk() or tarinfo.issym(): - self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname)) - else: - self._dbg(1, tarinfo.name) - - if tarinfo.isreg(): - self.makefile(tarinfo, targetpath) - elif tarinfo.isdir(): - self.makedir(tarinfo, targetpath) - elif tarinfo.isfifo(): - self.makefifo(tarinfo, targetpath) - elif tarinfo.ischr() or tarinfo.isblk(): - self.makedev(tarinfo, targetpath) - elif tarinfo.islnk() or tarinfo.issym(): - self.makelink(tarinfo, targetpath) - elif tarinfo.type not in SUPPORTED_TYPES: - self.makeunknown(tarinfo, targetpath) - else: - self.makefile(tarinfo, targetpath) - - if set_attrs: - self.chown(tarinfo, targetpath) - if not tarinfo.issym(): - self.chmod(tarinfo, targetpath) - self.utime(tarinfo, targetpath) - - #-------------------------------------------------------------------------- - # Below are the different file methods. They are called via - # _extract_member() when extract() is called. They can be replaced in a - # subclass to implement other functionality. - - def makedir(self, tarinfo, targetpath): - """Make a directory called targetpath. - """ - try: - # Use a safe mode for the directory, the real mode is set - # later in _extract_member(). - os.mkdir(targetpath, 0o700) - except EnvironmentError as e: - if e.errno != errno.EEXIST: - raise - - def makefile(self, tarinfo, targetpath): - """Make a file called targetpath. - """ - source = self.fileobj - source.seek(tarinfo.offset_data) - target = bltn_open(targetpath, "wb") - if tarinfo.sparse is not None: - for offset, size in tarinfo.sparse: - target.seek(offset) - copyfileobj(source, target, size) - else: - copyfileobj(source, target, tarinfo.size) - target.seek(tarinfo.size) - target.truncate() - target.close() - - def makeunknown(self, tarinfo, targetpath): - """Make a file from a TarInfo object with an unknown type - at targetpath. - """ - self.makefile(tarinfo, targetpath) - self._dbg(1, "tarfile: Unknown file type %r, " \ - "extracted as regular file." % tarinfo.type) - - def makefifo(self, tarinfo, targetpath): - """Make a fifo called targetpath. - """ - if hasattr(os, "mkfifo"): - os.mkfifo(targetpath) - else: - raise ExtractError("fifo not supported by system") - - def makedev(self, tarinfo, targetpath): - """Make a character or block device called targetpath. - """ - if not hasattr(os, "mknod") or not hasattr(os, "makedev"): - raise ExtractError("special devices not supported by system") - - mode = tarinfo.mode - if tarinfo.isblk(): - mode |= stat.S_IFBLK - else: - mode |= stat.S_IFCHR - - os.mknod(targetpath, mode, - os.makedev(tarinfo.devmajor, tarinfo.devminor)) - - def makelink(self, tarinfo, targetpath): - """Make a (symbolic) link called targetpath. If it cannot be created - (platform limitation), we try to make a copy of the referenced file - instead of a link. - """ - try: - # For systems that support symbolic and hard links. - if tarinfo.issym(): - os.symlink(tarinfo.linkname, targetpath) - else: - # See extract(). - if os.path.exists(tarinfo._link_target): - os.link(tarinfo._link_target, targetpath) - else: - self._extract_member(self._find_link_target(tarinfo), - targetpath) - except symlink_exception: - if tarinfo.issym(): - linkpath = os.path.join(os.path.dirname(tarinfo.name), - tarinfo.linkname) - else: - linkpath = tarinfo.linkname - else: - try: - self._extract_member(self._find_link_target(tarinfo), - targetpath) - except KeyError: - raise ExtractError("unable to resolve link inside archive") - - def chown(self, tarinfo, targetpath): - """Set owner of targetpath according to tarinfo. - """ - if pwd and hasattr(os, "geteuid") and os.geteuid() == 0: - # We have to be root to do so. - try: - g = grp.getgrnam(tarinfo.gname)[2] - except KeyError: - g = tarinfo.gid - try: - u = pwd.getpwnam(tarinfo.uname)[2] - except KeyError: - u = tarinfo.uid - try: - if tarinfo.issym() and hasattr(os, "lchown"): - os.lchown(targetpath, u, g) - else: - if sys.platform != "os2emx": - os.chown(targetpath, u, g) - except EnvironmentError as e: - raise ExtractError("could not change owner") - - def chmod(self, tarinfo, targetpath): - """Set file permissions of targetpath according to tarinfo. - """ - if hasattr(os, 'chmod'): - try: - os.chmod(targetpath, tarinfo.mode) - except EnvironmentError as e: - raise ExtractError("could not change mode") - - def utime(self, tarinfo, targetpath): - """Set modification time of targetpath according to tarinfo. - """ - if not hasattr(os, 'utime'): - return - try: - os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) - except EnvironmentError as e: - raise ExtractError("could not change modification time") - - #-------------------------------------------------------------------------- - def next(self): - """Return the next member of the archive as a TarInfo object, when - TarFile is opened for reading. Return None if there is no more - available. - """ - self._check("ra") - if self.firstmember is not None: - m = self.firstmember - self.firstmember = None - return m - - # Read the next block. - self.fileobj.seek(self.offset) - tarinfo = None - while True: - try: - tarinfo = self.tarinfo.fromtarfile(self) - except EOFHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - except InvalidHeaderError as e: - if self.ignore_zeros: - self._dbg(2, "0x%X: %s" % (self.offset, e)) - self.offset += BLOCKSIZE - continue - elif self.offset == 0: - raise ReadError(str(e)) - except EmptyHeaderError: - if self.offset == 0: - raise ReadError("empty file") - except TruncatedHeaderError as e: - if self.offset == 0: - raise ReadError(str(e)) - except SubsequentHeaderError as e: - raise ReadError(str(e)) - break - - if tarinfo is not None: - self.members.append(tarinfo) - else: - self._loaded = True - - return tarinfo - - #-------------------------------------------------------------------------- - # Little helper methods: - - def _getmember(self, name, tarinfo=None, normalize=False): - """Find an archive member by name from bottom to top. - If tarinfo is given, it is used as the starting point. - """ - # Ensure that all members have been loaded. - members = self.getmembers() - - # Limit the member search list up to tarinfo. - if tarinfo is not None: - members = members[:members.index(tarinfo)] - - if normalize: - name = os.path.normpath(name) - - for member in reversed(members): - if normalize: - member_name = os.path.normpath(member.name) - else: - member_name = member.name - - if name == member_name: - return member - - def _load(self): - """Read through the entire archive file and look for readable - members. - """ - while True: - tarinfo = self.next() - if tarinfo is None: - break - self._loaded = True - - def _check(self, mode=None): - """Check if TarFile is still open, and if the operation's mode - corresponds to TarFile's mode. - """ - if self.closed: - raise IOError("%s is closed" % self.__class__.__name__) - if mode is not None and self.mode not in mode: - raise IOError("bad operation for mode %r" % self.mode) - - def _find_link_target(self, tarinfo): - """Find the target member of a symlink or hardlink member in the - archive. - """ - if tarinfo.issym(): - # Always search the entire archive. - linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname - limit = None - else: - # Search the archive before the link, because a hard link is - # just a reference to an already archived file. - linkname = tarinfo.linkname - limit = tarinfo - - member = self._getmember(linkname, tarinfo=limit, normalize=True) - if member is None: - raise KeyError("linkname %r not found" % linkname) - return member - - def __iter__(self): - """Provide an iterator object. - """ - if self._loaded: - return iter(self.members) - else: - return TarIter(self) - - def _dbg(self, level, msg): - """Write debugging output to sys.stderr. - """ - if level <= self.debug: - print(msg, file=sys.stderr) - - def __enter__(self): - self._check() - return self - - def __exit__(self, type, value, traceback): - if type is None: - self.close() - else: - # An exception occurred. We must not call close() because - # it would try to write end-of-archive blocks and padding. - if not self._extfileobj: - self.fileobj.close() - self.closed = True -# class TarFile - -class TarIter(object): - """Iterator Class. - - for tarinfo in TarFile(...): - suite... - """ - - def __init__(self, tarfile): - """Construct a TarIter object. - """ - self.tarfile = tarfile - self.index = 0 - def __iter__(self): - """Return iterator object. - """ - return self - - def __next__(self): - """Return the next item using TarFile's next() method. - When all members have been read, set TarFile as _loaded. - """ - # Fix for SF #1100429: Under rare circumstances it can - # happen that getmembers() is called during iteration, - # which will cause TarIter to stop prematurely. - if not self.tarfile._loaded: - tarinfo = self.tarfile.next() - if not tarinfo: - self.tarfile._loaded = True - raise StopIteration - else: - try: - tarinfo = self.tarfile.members[self.index] - except IndexError: - raise StopIteration - self.index += 1 - return tarinfo - - next = __next__ # for Python 2.x - -#-------------------- -# exported functions -#-------------------- -def is_tarfile(name): - """Return True if name points to a tar archive that we - are able to handle, else return False. - """ - try: - t = open(name) - t.close() - return True - except TarError: - return False - -bltn_open = open -open = TarFile.open diff --git a/src/rez/vendor/distlib/compat.py b/src/rez/vendor/distlib/compat.py index ff328c8ee..ca561dd2e 100644 --- a/src/rez/vendor/distlib/compat.py +++ b/src/rez/vendor/distlib/compat.py @@ -8,6 +8,7 @@ import os import re +import shutil import sys try: @@ -22,7 +23,6 @@ from types import FileType as file_type import __builtin__ as builtins import ConfigParser as configparser - from ._backport import shutil from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit from urllib import (urlretrieve, quote as _quote, unquote, url2pathname, pathname2url, ContentTooShortError, splittype) @@ -34,9 +34,8 @@ def quote(s): import urllib2 from urllib2 import (Request, urlopen, URLError, HTTPError, - HTTPBasicAuthHandler, HTTPPasswordMgr, - HTTPHandler, HTTPRedirectHandler, - build_opener) + HTTPBasicAuthHandler, HTTPPasswordMgr, HTTPHandler, + HTTPRedirectHandler, build_opener) if ssl: from urllib2 import HTTPSHandler import httplib @@ -48,17 +47,18 @@ def quote(s): from itertools import ifilter as filter from itertools import ifilterfalse as filterfalse - _userprog = None - def splituser(host): - """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" - global _userprog - if _userprog is None: - import re - _userprog = re.compile('^(.*)@(.*)$') + # Leaving this around for now, in case it needs resurrecting in some way + # _userprog = None + # def splituser(host): + # """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'.""" + # global _userprog + # if _userprog is None: + # import re + # _userprog = re.compile('^(.*)@(.*)$') - match = _userprog.match(host) - if match: return match.group(1, 2) - return None, host + # match = _userprog.match(host) + # if match: return match.group(1, 2) + # return None, host else: # pragma: no cover from io import StringIO @@ -67,14 +67,12 @@ def splituser(host): from io import TextIOWrapper as file_type import builtins import configparser - import shutil - from urllib.parse import (urlparse, urlunparse, urljoin, splituser, quote, - unquote, urlsplit, urlunsplit, splittype) + from urllib.parse import (urlparse, urlunparse, urljoin, quote, unquote, + urlsplit, urlunsplit, splittype) from urllib.request import (urlopen, urlretrieve, Request, url2pathname, - pathname2url, - HTTPBasicAuthHandler, HTTPPasswordMgr, - HTTPHandler, HTTPRedirectHandler, - build_opener) + pathname2url, HTTPBasicAuthHandler, + HTTPPasswordMgr, HTTPHandler, + HTTPRedirectHandler, build_opener) if ssl: from urllib.request import HTTPSHandler from urllib.error import HTTPError, URLError, ContentTooShortError @@ -90,11 +88,11 @@ def splituser(host): try: from ssl import match_hostname, CertificateError -except ImportError: # pragma: no cover +except ImportError: # pragma: no cover + class CertificateError(ValueError): pass - def _dnsname_match(dn, hostname, max_wildcards=1): """Matching according to RFC 6125, section 6.4.3 @@ -144,7 +142,6 @@ def _dnsname_match(dn, hostname, max_wildcards=1): pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) return pat.match(hostname) - def match_hostname(cert, hostname): """Verify that *cert* (in decoded format as returned by SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 @@ -177,24 +174,26 @@ def match_hostname(cert, hostname): dnsnames.append(value) if len(dnsnames) > 1: raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) + "doesn't match either of %s" % + (hostname, ', '.join(map(repr, dnsnames)))) elif len(dnsnames) == 1: raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) + "doesn't match %r" % + (hostname, dnsnames[0])) else: raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") + "subjectAltName fields were found") try: from types import SimpleNamespace as Container except ImportError: # pragma: no cover + class Container(object): """ A generic container for when multiple values need to be returned """ + def __init__(self, **kwargs): self.__dict__.update(kwargs) @@ -213,12 +212,12 @@ def which(cmd, mode=os.F_OK | os.X_OK, path=None): path. """ + # Check that a given file can be accessed with the correct mode. # Additionally check that `file` is not a directory, as on Windows # directories pass the os.access check. def _access_check(fn, mode): - return (os.path.exists(fn) and os.access(fn, mode) - and not os.path.isdir(fn)) + return (os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)) # If we're given a path with a directory part, look it up directly rather # than referring to PATH directories. This includes checking relative to the @@ -236,7 +235,7 @@ def _access_check(fn, mode): if sys.platform == "win32": # The current directory takes precedence on Windows. - if not os.curdir in path: + if os.curdir not in path: path.insert(0, os.curdir) # PATHEXT is necessary to check on Windows. @@ -257,7 +256,7 @@ def _access_check(fn, mode): seen = set() for dir in path: normdir = os.path.normcase(dir) - if not normdir in seen: + if normdir not in seen: seen.add(normdir) for thefile in files: name = os.path.join(dir, thefile) @@ -276,6 +275,7 @@ def _access_check(fn, mode): from zipfile import ZipExtFile as BaseZipExtFile class ZipExtFile(BaseZipExtFile): + def __init__(self, base): self.__dict__.update(base.__dict__) @@ -287,6 +287,7 @@ def __exit__(self, *exc_info): # return None, so if an exception occurred, it will propagate class ZipFile(BaseZipFile): + def __enter__(self): return self @@ -298,9 +299,11 @@ def open(self, *args, **kwargs): base = BaseZipFile.open(self, *args, **kwargs) return ZipExtFile(base) + try: from platform import python_implementation -except ImportError: # pragma: no cover +except ImportError: # pragma: no cover + def python_implementation(): """Return a string identifying the Python implementation.""" if 'PyPy' in sys.version: @@ -311,15 +314,13 @@ def python_implementation(): return 'IronPython' return 'CPython' -try: - import sysconfig -except ImportError: # pragma: no cover - from ._backport import sysconfig + +import sysconfig try: callable = callable -except NameError: # pragma: no cover - from collections import Callable +except NameError: # pragma: no cover + from collections.abc import Callable def callable(obj): return isinstance(obj, Callable) @@ -359,11 +360,11 @@ def fsdecode(filename): raise TypeError("expect bytes or str, not %s" % type(filename).__name__) + try: from tokenize import detect_encoding -except ImportError: # pragma: no cover +except ImportError: # pragma: no cover from codecs import BOM_UTF8, lookup - import re cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)") @@ -402,6 +403,7 @@ def detect_encoding(readline): bom_found = False encoding = None default = 'utf-8' + def read_or_stop(): try: return readline() @@ -431,8 +433,8 @@ def find_cookie(line): if filename is None: msg = "unknown encoding: " + encoding else: - msg = "unknown encoding for {!r}: {}".format(filename, - encoding) + msg = "unknown encoding for {!r}: {}".format( + filename, encoding) raise SyntaxError(msg) if bom_found: @@ -441,7 +443,8 @@ def find_cookie(line): if filename is None: msg = 'encoding problem: utf-8' else: - msg = 'encoding problem for {!r}: utf-8'.format(filename) + msg = 'encoding problem for {!r}: utf-8'.format( + filename) raise SyntaxError(msg) encoding += '-sig' return encoding @@ -468,6 +471,7 @@ def find_cookie(line): return default, [first, second] + # For converting & <-> & etc. try: from html import escape @@ -480,12 +484,13 @@ def find_cookie(line): try: from collections import ChainMap -except ImportError: # pragma: no cover +except ImportError: # pragma: no cover from collections import MutableMapping try: from reprlib import recursive_repr as _recursive_repr except ImportError: + def _recursive_repr(fillvalue='...'): ''' Decorator to make a repr function return fillvalue for a recursive @@ -510,13 +515,15 @@ def wrapper(self): wrapper.__module__ = getattr(user_function, '__module__') wrapper.__doc__ = getattr(user_function, '__doc__') wrapper.__name__ = getattr(user_function, '__name__') - wrapper.__annotations__ = getattr(user_function, '__annotations__', {}) + wrapper.__annotations__ = getattr(user_function, + '__annotations__', {}) return wrapper return decorating_function class ChainMap(MutableMapping): - ''' A ChainMap groups multiple dicts (or other mappings) together + ''' + A ChainMap groups multiple dicts (or other mappings) together to create a single, updateable view. The underlying mappings are stored in a list. That list is public and can @@ -525,7 +532,6 @@ class ChainMap(MutableMapping): Lookups search the underlying mappings successively until a key is found. In contrast, writes, updates, and deletions only operate on the first mapping. - ''' def __init__(self, *maps): @@ -533,7 +539,7 @@ def __init__(self, *maps): If no mappings are provided, a single empty dictionary is used. ''' - self.maps = list(maps) or [{}] # always at least one map + self.maps = list(maps) or [{}] # always at least one map def __missing__(self, key): raise KeyError(key) @@ -541,16 +547,19 @@ def __missing__(self, key): def __getitem__(self, key): for mapping in self.maps: try: - return mapping[key] # can't use 'key in mapping' with defaultdict + return mapping[ + key] # can't use 'key in mapping' with defaultdict except KeyError: pass - return self.__missing__(key) # support subclasses that define __missing__ + return self.__missing__( + key) # support subclasses that define __missing__ def get(self, key, default=None): return self[key] if key in self else default def __len__(self): - return len(set().union(*self.maps)) # reuses stored hash values if possible + return len(set().union( + *self.maps)) # reuses stored hash values if possible def __iter__(self): return iter(set().union(*self.maps)) @@ -577,12 +586,12 @@ def copy(self): __copy__ = copy - def new_child(self): # like Django's Context.push() + def new_child(self): # like Django's Context.push() 'New ChainMap with a new dict followed by all previous maps.' return self.__class__({}, *self.maps) @property - def parents(self): # like Django's Context.pop() + def parents(self): # like Django's Context.pop() 'New ChainMap from maps[1:].' return self.__class__(*self.maps[1:]) @@ -593,7 +602,8 @@ def __delitem__(self, key): try: del self.maps[0][key] except KeyError: - raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + raise KeyError( + 'Key not found in the first mapping: {!r}'.format(key)) def popitem(self): 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' @@ -607,34 +617,35 @@ def pop(self, key, *args): try: return self.maps[0].pop(key, *args) except KeyError: - raise KeyError('Key not found in the first mapping: {!r}'.format(key)) + raise KeyError( + 'Key not found in the first mapping: {!r}'.format(key)) def clear(self): 'Clear maps[0], leaving maps[1:] intact.' self.maps[0].clear() + try: from importlib.util import cache_from_source # Python >= 3.4 except ImportError: # pragma: no cover - try: - from imp import cache_from_source - except ImportError: # pragma: no cover - def cache_from_source(path, debug_override=None): - assert path.endswith('.py') - if debug_override is None: - debug_override = __debug__ - if debug_override: - suffix = 'c' - else: - suffix = 'o' - return path + suffix + + def cache_from_source(path, debug_override=None): + assert path.endswith('.py') + if debug_override is None: + debug_override = __debug__ + if debug_override: + suffix = 'c' + else: + suffix = 'o' + return path + suffix + try: from collections import OrderedDict -except ImportError: # pragma: no cover -## {{{ http://code.activestate.com/recipes/576693/ (r9) -# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. -# Passes Python2.7's test suite and incorporates all the latest updates. +except ImportError: # pragma: no cover + # {{{ http://code.activestate.com/recipes/576693/ (r9) + # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy. + # Passes Python2.7's test suite and incorporates all the latest updates. try: from thread import get_ident as _get_ident except ImportError: @@ -645,9 +656,9 @@ def cache_from_source(path, debug_override=None): except ImportError: pass - class OrderedDict(dict): 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. # The inherited dict provides __getitem__, __len__, __contains__, and get. # The remaining methods are order-aware. @@ -665,11 +676,12 @@ def __init__(self, *args, **kwds): ''' if len(args) > 1: - raise TypeError('expected at most 1 arguments, got %d' % len(args)) + raise TypeError('expected at most 1 arguments, got %d' % + len(args)) try: self.__root except AttributeError: - self.__root = root = [] # sentinel node + self.__root = root = [] # sentinel node root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds) @@ -783,7 +795,7 @@ def update(*args, **kwds): ''' if len(args) > 2: raise TypeError('update() takes at most 2 positional ' - 'arguments (%d given)' % (len(args),)) + 'arguments (%d given)' % (len(args), )) elif not args: raise TypeError('update() takes at least 1 argument (0 given)') self = args[0] @@ -829,14 +841,15 @@ def setdefault(self, key, default=None): def __repr__(self, _repr_running=None): 'od.__repr__() <==> repr(od)' - if not _repr_running: _repr_running = {} + if not _repr_running: + _repr_running = {} call_key = id(self), _get_ident() if call_key in _repr_running: return '...' _repr_running[call_key] = 1 try: if not self: - return '%s()' % (self.__class__.__name__,) + return '%s()' % (self.__class__.__name__, ) return '%s(%r)' % (self.__class__.__name__, self.items()) finally: del _repr_running[call_key] @@ -848,8 +861,8 @@ def __reduce__(self): for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: - return (self.__class__, (items,), inst_dict) - return self.__class__, (items,) + return (self.__class__, (items, ), inst_dict) + return self.__class__, (items, ) def copy(self): 'od.copy() -> a shallow copy of od' @@ -872,7 +885,8 @@ def __eq__(self, other): ''' if isinstance(other, OrderedDict): - return len(self)==len(other) and self.items() == other.items() + return len(self) == len( + other) and self.items() == other.items() return dict.__eq__(self, other) def __ne__(self, other): @@ -892,19 +906,18 @@ def viewitems(self): "od.viewitems() -> a set-like object providing a view on od's items" return ItemsView(self) + try: from logging.config import BaseConfigurator, valid_ident -except ImportError: # pragma: no cover +except ImportError: # pragma: no cover IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) - def valid_ident(s): m = IDENTIFIER.match(s) if not m: raise ValueError('Not a valid Python identifier: %r' % s) return True - # The ConvertingXXX classes are wrappers around standard Python containers, # and they serve to convert any suitable values in the container. The # conversion converts base dicts, lists and tuples to their wrapped @@ -920,7 +933,7 @@ class ConvertingDict(dict): def __getitem__(self, key): value = dict.__getitem__(self, key) result = self.configurator.convert(value) - #If the converted value is different, save for next time + # If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, @@ -932,7 +945,7 @@ def __getitem__(self, key): def get(self, key, default=None): value = dict.get(self, key, default) result = self.configurator.convert(value) - #If the converted value is different, save for next time + # If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, @@ -953,10 +966,11 @@ def pop(self, key, default=None): class ConvertingList(list): """A converting list wrapper.""" + def __getitem__(self, key): value = list.__getitem__(self, key) result = self.configurator.convert(value) - #If the converted value is different, save for next time + # If the converted value is different, save for next time if value is not result: self[key] = result if type(result) in (ConvertingDict, ConvertingList, @@ -976,6 +990,7 @@ def pop(self, idx=-1): class ConvertingTuple(tuple): """A converting tuple wrapper.""" + def __getitem__(self, key): value = tuple.__getitem__(self, key) result = self.configurator.convert(value) @@ -999,8 +1014,8 @@ class BaseConfigurator(object): DIGIT_PATTERN = re.compile(r'^\d+$') value_converters = { - 'ext' : 'ext_convert', - 'cfg' : 'cfg_convert', + 'ext': 'ext_convert', + 'cfg': 'cfg_convert', } # We might want to use a different one, e.g. importlib @@ -1046,7 +1061,6 @@ def cfg_convert(self, value): else: rest = rest[m.end():] d = self.config[m.groups()[0]] - #print d, rest while rest: m = self.DOT_PATTERN.match(rest) if m: @@ -1059,7 +1073,9 @@ def cfg_convert(self, value): d = d[idx] else: try: - n = int(idx) # try as number first (most likely) + n = int( + idx + ) # try as number first (most likely) d = d[n] except TypeError: d = d[idx] @@ -1068,7 +1084,7 @@ def cfg_convert(self, value): else: raise ValueError('Unable to convert ' '%r at %r' % (value, rest)) - #rest should be empty + # rest should be empty return d def convert(self, value): @@ -1077,14 +1093,15 @@ def convert(self, value): replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do. """ - if not isinstance(value, ConvertingDict) and isinstance(value, dict): + if not isinstance(value, ConvertingDict) and isinstance( + value, dict): value = ConvertingDict(value) value.configurator = self - elif not isinstance(value, ConvertingList) and isinstance(value, list): + elif not isinstance(value, ConvertingList) and isinstance( + value, list): value = ConvertingList(value) value.configurator = self - elif not isinstance(value, ConvertingTuple) and\ - isinstance(value, tuple): + elif not isinstance(value, ConvertingTuple) and isinstance(value, tuple): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, string_types): diff --git a/src/rez/vendor/distlib/database.py b/src/rez/vendor/distlib/database.py index b13cdac92..c0f896a7d 100644 --- a/src/rez/vendor/distlib/database.py +++ b/src/rez/vendor/distlib/database.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2012-2017 The Python Software Foundation. +# Copyright (C) 2012-2023 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """PEP 376 implementation.""" @@ -20,24 +20,20 @@ from . import DistlibException, resources from .compat import StringIO from .version import get_scheme, UnsupportedVersionError -from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, - LEGACY_METADATA_FILENAME) -from .util import (parse_requirement, cached_property, parse_name_and_version, - read_exports, write_exports, CSVReader, CSVWriter) - - -__all__ = ['Distribution', 'BaseInstalledDistribution', - 'InstalledDistribution', 'EggInfoDistribution', - 'DistributionPath'] +from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME) +from .util import (parse_requirement, cached_property, parse_name_and_version, read_exports, write_exports, CSVReader, + CSVWriter) +__all__ = [ + 'Distribution', 'BaseInstalledDistribution', 'InstalledDistribution', 'EggInfoDistribution', 'DistributionPath' +] logger = logging.getLogger(__name__) EXPORTS_FILENAME = 'pydist-exports.json' COMMANDS_FILENAME = 'pydist-commands.json' -DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', - 'RESOURCES', EXPORTS_FILENAME, 'SHARED') +DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED', 'RESOURCES', EXPORTS_FILENAME, 'SHARED') DISTINFO_EXT = '.dist-info' @@ -46,6 +42,7 @@ class _Cache(object): """ A simple cache mapping names and .dist-info paths to distributions """ + def __init__(self): """ Initialise an instance. There is normally one for each DistributionPath. @@ -76,6 +73,7 @@ class DistributionPath(object): """ Represents a set of distributions installed on a path (typically sys.path). """ + def __init__(self, path=None, include_egg=False): """ Create an instance from a path, optionally including legacy (distutils/ @@ -111,7 +109,6 @@ def clear_cache(self): self._cache.clear() self._cache_egg.clear() - def _yield_distributions(self): """ Yield .dist-info and/or .egg(-info) distributions. @@ -132,29 +129,31 @@ def _yield_distributions(self): r = finder.find(entry) if not r or r.path in seen: continue - if self._include_dist and entry.endswith(DISTINFO_EXT): - possible_filenames = [METADATA_FILENAME, - WHEEL_METADATA_FILENAME, - LEGACY_METADATA_FILENAME] - for metadata_filename in possible_filenames: - metadata_path = posixpath.join(entry, metadata_filename) - pydist = finder.find(metadata_path) - if pydist: - break - else: - continue + try: + if self._include_dist and entry.endswith(DISTINFO_EXT): + possible_filenames = [METADATA_FILENAME, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME] + for metadata_filename in possible_filenames: + metadata_path = posixpath.join(entry, metadata_filename) + pydist = finder.find(metadata_path) + if pydist: + break + else: + continue - with contextlib.closing(pydist.as_stream()) as stream: - metadata = Metadata(fileobj=stream, scheme='legacy') - logger.debug('Found %s', r.path) - seen.add(r.path) - yield new_dist_class(r.path, metadata=metadata, - env=self) - elif self._include_egg and entry.endswith(('.egg-info', - '.egg')): - logger.debug('Found %s', r.path) - seen.add(r.path) - yield old_dist_class(r.path, self) + with contextlib.closing(pydist.as_stream()) as stream: + metadata = Metadata(fileobj=stream, scheme='legacy') + logger.debug('Found %s', r.path) + seen.add(r.path) + yield new_dist_class(r.path, metadata=metadata, env=self) + elif self._include_egg and entry.endswith(('.egg-info', '.egg')): + logger.debug('Found %s', r.path) + seen.add(r.path) + yield old_dist_class(r.path, self) + except Exception as e: + msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s' + logger.warning(msg, r.path, e) + import warnings + warnings.warn(msg % (r.path, e), stacklevel=2) def _generate_cache(self): """ @@ -264,8 +263,7 @@ def provides_distribution(self, name, version=None): try: matcher = self._scheme.matcher('%s (%s)' % (name, version)) except ValueError: - raise DistlibException('invalid name or version: %r, %r' % - (name, version)) + raise DistlibException('invalid name or version: %r, %r' % (name, version)) for dist in self.get_distributions(): # We hit a problem on Travis where enum34 was installed and doesn't @@ -340,12 +338,12 @@ def __init__(self, metadata): """ self.metadata = metadata self.name = metadata.name - self.key = self.name.lower() # for case-insensitive comparisons + self.key = self.name.lower() # for case-insensitive comparisons self.version = metadata.version self.locator = None self.digest = None - self.extras = None # additional features requested - self.context = None # environment marker overrides + self.extras = None # additional features requested + self.context = None # environment marker overrides self.download_urls = set() self.digests = {} @@ -356,7 +354,7 @@ def source_url(self): """ return self.metadata.source_url - download_url = source_url # Backward compatibility + download_url = source_url # Backward compatibility @property def name_and_version(self): @@ -379,10 +377,9 @@ def provides(self): def _get_requirements(self, req_attr): md = self.metadata - logger.debug('Getting requirements from metadata %r', md.todict()) reqts = getattr(md, req_attr) - return set(md.get_requirements(reqts, extras=self.extras, - env=self.context)) + logger.debug('%s: got requirements %r from metadata: %r', self.name, req_attr, reqts) + return set(md.get_requirements(reqts, extras=self.extras, env=self.context)) @property def run_requires(self): @@ -419,12 +416,11 @@ def matches_requirement(self, req): matcher = scheme.matcher(r.requirement) except UnsupportedVersionError: # XXX compat-mode if cannot read the version - logger.warning('could not read version %r - using name only', - req) + logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) - name = matcher.key # case-insensitive + name = matcher.key # case-insensitive result = False for p in self.provides: @@ -459,9 +455,7 @@ def __eq__(self, other): if type(other) is not type(self): result = False else: - result = (self.name == other.name and - self.version == other.version and - self.source_url == other.source_url) + result = (self.name == other.name and self.version == other.version and self.source_url == other.source_url) return result def __hash__(self): @@ -550,10 +544,9 @@ def __init__(self, path, metadata=None, env=None): r = finder.find(WHEEL_METADATA_FILENAME) # Temporary - for legacy support if r is None: - r = finder.find('METADATA') + r = finder.find(LEGACY_METADATA_FILENAME) if r is None: - raise ValueError('no %s found in %s' % (METADATA_FILENAME, - path)) + raise ValueError('no %s found in %s' % (METADATA_FILENAME, path)) with contextlib.closing(r.as_stream()) as stream: metadata = Metadata(fileobj=stream, scheme='legacy') @@ -564,15 +557,14 @@ def __init__(self, path, metadata=None, env=None): r = finder.find('REQUESTED') self.requested = r is not None - p = os.path.join(path, 'top_level.txt') + p = os.path.join(path, 'top_level.txt') if os.path.exists(p): with open(p, 'rb') as f: - data = f.read() + data = f.read().decode('utf-8') self.modules = data.splitlines() def __repr__(self): - return '' % ( - self.name, self.version, self.path) + return '' % (self.name, self.version, self.path) def __str__(self): return "%s %s" % (self.name, self.version) @@ -589,14 +581,14 @@ def _get_records(self): with contextlib.closing(r.as_stream()) as stream: with CSVReader(stream=stream) as record_reader: # Base location is parent dir of .dist-info dir - #base_location = os.path.dirname(self.path) - #base_location = os.path.abspath(base_location) + # base_location = os.path.dirname(self.path) + # base_location = os.path.abspath(base_location) for row in record_reader: missing = [None for i in range(len(row), 3)] path, checksum, size = row + missing - #if not os.path.isabs(path): - # path = path.replace('/', os.sep) - # path = os.path.join(base_location, path) + # if not os.path.isabs(path): + # path = path.replace('/', os.sep) + # path = os.path.join(base_location, path) results.append((path, checksum, size)) return results @@ -694,8 +686,7 @@ def write_installed_files(self, paths, prefix, dry_run=False): size = '%d' % os.path.getsize(path) with open(path, 'rb') as fp: hash_value = self.get_hash(fp.read()) - if path.startswith(base) or (base_under_prefix and - path.startswith(prefix)): + if path.startswith(base) or (base_under_prefix and path.startswith(prefix)): path = os.path.relpath(path, base) writer.writerow((path, hash_value, size)) @@ -784,7 +775,7 @@ def write_shared_locations(self, paths, dry_run=False): for key in ('prefix', 'lib', 'headers', 'scripts', 'data'): path = paths[key] if os.path.isdir(paths[key]): - lines.append('%s=%s' % (key, path)) + lines.append('%s=%s' % (key, path)) for ns in paths.get('namespace', ()): lines.append('namespace=%s' % ns) @@ -819,9 +810,8 @@ def get_distinfo_file(self, path): # it's an absolute path? distinfo_dirname, path = path.split(os.sep)[-2:] if distinfo_dirname != self.path.split(os.sep)[-1]: - raise DistlibException( - 'dist-info file %r does not belong to the %r %s ' - 'distribution' % (path, self.name, self.version)) + raise DistlibException('dist-info file %r does not belong to the %r %s ' + 'distribution' % (path, self.name, self.version)) # The file must be relative if path not in DIST_FILES: @@ -847,8 +837,7 @@ def list_distinfo_files(self): yield path def __eq__(self, other): - return (isinstance(other, InstalledDistribution) and - self.path == other.path) + return (isinstance(other, InstalledDistribution) and self.path == other.path) # See http://docs.python.org/reference/datamodel#object.__hash__ __hash__ = object.__hash__ @@ -860,13 +849,14 @@ class EggInfoDistribution(BaseInstalledDistribution): if the given path happens to be a directory, the metadata is read from the file ``PKG-INFO`` under that directory.""" - requested = True # as we have no way of knowing, assume it was + requested = True # as we have no way of knowing, assume it was shared_locations = {} def __init__(self, path, env=None): + def set_name_and_version(s, n, v): s.name = n - s.key = n.lower() # for case-insensitive comparisons + s.key = n.lower() # for case-insensitive comparisons s.version = v self.path = path @@ -896,15 +886,17 @@ def parse_requires_data(data): lines = data.splitlines() for line in lines: line = line.strip() - if line.startswith('['): - logger.warning('Unexpected line: quitting requirement scan: %r', - line) + # sectioned files have bare newlines (separating sections) + if not line: # pragma: no cover + continue + if line.startswith('['): # pragma: no cover + logger.warning('Unexpected line: quitting requirement scan: %r', line) break r = parse_requirement(line) - if not r: + if not r: # pragma: no cover logger.warning('Not recognised as a requirement: %r', line) continue - if r.extras: + if r.extras: # pragma: no cover logger.warning('extra requirements in requires.txt are ' 'not supported') if not r.constraints: @@ -940,8 +932,7 @@ def parse_requires_path(req_path): else: # FIXME handle the case where zipfile is not available zipf = zipimport.zipimporter(path) - fileobj = StringIO( - zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) + fileobj = StringIO(zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8')) metadata = Metadata(fileobj=fileobj, scheme='legacy') try: data = zipf.get_data('EGG-INFO/requires.txt') @@ -975,8 +966,7 @@ def parse_requires_path(req_path): return metadata def __repr__(self): - return '' % ( - self.name, self.version, self.path) + return '' % (self.name, self.version, self.path) def __str__(self): return "%s %s" % (self.name, self.version) @@ -1032,7 +1022,7 @@ def _size(path): logger.warning('Non-existent file: %s', p) if p.endswith(('.pyc', '.pyo')): continue - #otherwise fall through and fail + # otherwise fall through and fail if not os.path.isdir(p): result.append((p, _md5(p), _size(p))) result.append((record_path, None, None)) @@ -1068,12 +1058,12 @@ def list_distinfo_files(self, absolute=False): yield line def __eq__(self, other): - return (isinstance(other, EggInfoDistribution) and - self.path == other.path) + return (isinstance(other, EggInfoDistribution) and self.path == other.path) # See http://docs.python.org/reference/datamodel#object.__hash__ __hash__ = object.__hash__ + new_dist_class = InstalledDistribution old_dist_class = EggInfoDistribution @@ -1107,7 +1097,7 @@ def add_distribution(self, distribution): """ self.adjacency_list[distribution] = [] self.reverse_list[distribution] = [] - #self.missing[distribution] = [] + # self.missing[distribution] = [] def add_edge(self, x, y, label=None): """Add an edge from distribution *x* to distribution *y* with the given @@ -1167,9 +1157,8 @@ def to_dot(self, f, skip_disconnected=True): if len(adjs) == 0 and not skip_disconnected: disconnected.append(dist) for other, label in adjs: - if not label is None: - f.write('"%s" -> "%s" [label="%s"]\n' % - (dist.name, other.name, label)) + if label is not None: + f.write('"%s" -> "%s" [label="%s"]\n' % (dist.name, other.name, label)) else: f.write('"%s" -> "%s"\n' % (dist.name, other.name)) if not skip_disconnected and len(disconnected) > 0: @@ -1209,8 +1198,7 @@ def topological_sort(self): # Remove from the adjacency list of others for k, v in alist.items(): alist[k] = [(d, r) for d, r in v if d not in to_remove] - logger.debug('Moving to result: %s', - ['%s (%s)' % (d.name, d.version) for d in to_remove]) + logger.debug('Moving to result: %s', ['%s (%s)' % (d.name, d.version) for d in to_remove]) result.extend(to_remove) return result, list(alist.keys()) @@ -1245,19 +1233,17 @@ def make_graph(dists, scheme='default'): # now make the edges for dist in dists: - requires = (dist.run_requires | dist.meta_requires | - dist.build_requires | dist.dev_requires) + requires = (dist.run_requires | dist.meta_requires | dist.build_requires | dist.dev_requires) for req in requires: try: matcher = scheme.matcher(req) except UnsupportedVersionError: # XXX compat-mode if cannot read the version - logger.warning('could not read version %r - using name only', - req) + logger.warning('could not read version %r - using name only', req) name = req.split()[0] matcher = scheme.matcher(name) - name = matcher.key # case-insensitive + name = matcher.key # case-insensitive matched = False if name in provided: @@ -1308,22 +1294,26 @@ def get_required_dists(dists, dist): :param dists: a list of distributions :param dist: a distribution, member of *dists* for which we are interested + in finding the dependencies. """ if dist not in dists: raise DistlibException('given distribution %r is not a member ' 'of the list' % dist.name) graph = make_graph(dists) - req = [] # required distributions + req = set() # required distributions todo = graph.adjacency_list[dist] # list of nodes we should inspect + seen = set(t[0] for t in todo) # already added to todo while todo: d = todo.pop()[0] - req.append(d) - for pred in graph.adjacency_list[d]: - if pred not in req: + req.add(d) + pred_list = graph.adjacency_list[d] + for pred in pred_list: + d = pred[0] + if d not in req and d not in seen: + seen.add(d) todo.append(pred) - return req diff --git a/src/rez/vendor/distlib/index.py b/src/rez/vendor/distlib/index.py index 7a87cdcf7..56cd28671 100644 --- a/src/rez/vendor/distlib/index.py +++ b/src/rez/vendor/distlib/index.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2013 Vinay Sajip. +# Copyright (C) 2013-2023 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # @@ -12,19 +12,20 @@ import tempfile try: from threading import Thread -except ImportError: +except ImportError: # pragma: no cover from dummy_threading import Thread from . import DistlibException from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr, urlparse, build_opener, string_types) -from .util import cached_property, zip_dir, ServerProxy +from .util import zip_dir, ServerProxy logger = logging.getLogger(__name__) DEFAULT_INDEX = 'https://pypi.org/pypi' DEFAULT_REALM = 'pypi' + class PackageIndex(object): """ This class represents a package index compatible with PyPI, the Python @@ -67,21 +68,17 @@ def _get_pypirc_command(self): Get the distutils command for interacting with PyPI configurations. :return: the command. """ - from distutils.core import Distribution - from distutils.config import PyPIRCCommand - d = Distribution() - return PyPIRCCommand(d) + from .util import _get_pypirc_command as cmd + return cmd() def read_configuration(self): """ - Read the PyPI access configuration as supported by distutils, getting - PyPI to do the actual work. This populates ``username``, ``password``, - ``realm`` and ``url`` attributes from the configuration. + Read the PyPI access configuration as supported by distutils. This populates + ``username``, ``password``, ``realm`` and ``url`` attributes from the + configuration. """ - # get distutils to do the work - c = self._get_pypirc_command() - c.repository = self.url - cfg = c._read_pypirc() + from .util import _load_pypirc + cfg = _load_pypirc(self) self.username = cfg.get('username') self.password = cfg.get('password') self.realm = cfg.get('realm', 'pypi') @@ -91,13 +88,10 @@ def save_configuration(self): """ Save the PyPI access configuration. You must have set ``username`` and ``password`` attributes before calling this method. - - Again, distutils is used to do the actual work. """ self.check_credentials() - # get distutils to do the work - c = self._get_pypirc_command() - c._store_pypirc(self.username, self.password) + from .util import _store_pypirc + _store_pypirc(self) def check_credentials(self): """ @@ -111,7 +105,7 @@ def check_credentials(self): pm.add_password(self.realm, netloc, self.username, self.password) self.password_handler = HTTPBasicAuthHandler(pm) - def register(self, metadata): + def register(self, metadata): # pragma: no cover """ Register a distribution on PyPI, using the provided metadata. @@ -126,7 +120,7 @@ def register(self, metadata): d = metadata.todict() d[':action'] = 'verify' request = self.encode_request(d.items(), []) - response = self.send_request(request) + self.send_request(request) d[':action'] = 'submit' request = self.encode_request(d.items(), []) return self.send_request(request) @@ -149,8 +143,7 @@ def _reader(self, name, stream, outbuf): logger.debug('%s: %s' % (name, s)) stream.close() - def get_sign_command(self, filename, signer, sign_password, - keystore=None): + def get_sign_command(self, filename, signer, sign_password, keystore=None): # pragma: no cover """ Return a suitable command for signing a file. @@ -213,7 +206,7 @@ def run_command(self, cmd, input_data=None): t2.join() return p.returncode, stdout, stderr - def sign_file(self, filename, signer, sign_password, keystore=None): + def sign_file(self, filename, signer, sign_password, keystore=None): # pragma: no cover """ Sign a file. @@ -293,7 +286,7 @@ def upload_file(self, metadata, filename, signer=None, sign_password=None, request = self.encode_request(d.items(), files) return self.send_request(request) - def upload_documentation(self, metadata, doc_dir): + def upload_documentation(self, metadata, doc_dir): # pragma: no cover """ Upload documentation to the index. @@ -366,8 +359,7 @@ def verify_signature(self, signature_filename, data_filename, keystore) rc, stdout, stderr = self.run_command(cmd) if rc not in (0, 1): - raise DistlibException('verify command failed with error ' - 'code %s' % rc) + raise DistlibException('verify command failed with error code %s' % rc) return rc == 0 def download_file(self, url, destfile, digest=None, reporthook=None): @@ -506,7 +498,7 @@ def encode_request(self, fields, files): } return Request(self.url, body, headers) - def search(self, terms, operator=None): + def search(self, terms, operator=None): # pragma: no cover if isinstance(terms, string_types): terms = {'name': terms} rpc_proxy = ServerProxy(self.url, timeout=3.0) diff --git a/src/rez/vendor/distlib/locators.py b/src/rez/vendor/distlib/locators.py index a7ed9469d..222c1bf3e 100644 --- a/src/rez/vendor/distlib/locators.py +++ b/src/rez/vendor/distlib/locators.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2012-2015 Vinay Sajip. +# Copyright (C) 2012-2023 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # @@ -19,14 +19,11 @@ import zlib from . import DistlibException -from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, - queue, quote, unescape, string_types, build_opener, - HTTPRedirectHandler as BaseRedirectHandler, text_type, - Request, HTTPError, URLError) +from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url, queue, quote, unescape, build_opener, + HTTPRedirectHandler as BaseRedirectHandler, text_type, Request, HTTPError, URLError) from .database import Distribution, DistributionPath, make_dist from .metadata import Metadata, MetadataInvalidError -from .util import (cached_property, parse_credentials, ensure_slash, - split_filename, get_project_data, parse_requirement, +from .util import (cached_property, ensure_slash, split_filename, get_project_data, parse_requirement, parse_name_and_version, ServerProxy, normalize_name) from .version import get_scheme, UnsupportedVersionError from .wheel import Wheel, is_compatible @@ -38,6 +35,7 @@ HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml') DEFAULT_INDEX = 'https://pypi.org/pypi' + def get_all_distribution_names(url=None): """ Return all distribution names known by an index. @@ -52,10 +50,12 @@ def get_all_distribution_names(url=None): finally: client('close')() + class RedirectHandler(BaseRedirectHandler): """ A class to work around a bug in some Python 3.2.x releases. """ + # There's a bug in the base version for some 3.2.x # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header # returns e.g. /abc, it bails because it says the scheme '' @@ -78,18 +78,18 @@ def http_error_302(self, req, fp, code, msg, headers): headers.replace_header(key, newurl) else: headers[key] = newurl - return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, - headers) + return BaseRedirectHandler.http_error_302(self, req, fp, code, msg, headers) http_error_301 = http_error_303 = http_error_307 = http_error_302 + class Locator(object): """ A base class for locators - things that locate distributions. """ source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz') binary_extensions = ('.egg', '.exe', '.whl') - excluded_extensions = ('.pdf',) + excluded_extensions = ('.pdf', ) # A list of tags indicating which wheels you want to match. The default # value of None matches against the tags compatible with the running @@ -97,7 +97,7 @@ class Locator(object): # instance to a list of tuples (pyver, abi, arch) which you want to match. wheel_tags = None - downloadable_extensions = source_extensions + ('.whl',) + downloadable_extensions = source_extensions + ('.whl', ) def __init__(self, scheme='default'): """ @@ -197,8 +197,7 @@ def score_url(self, url): is_downloadable = basename.endswith(self.downloadable_extensions) if is_wheel: compatible = is_compatible(Wheel(basename), self.wheel_tags) - return (t.scheme == 'https', 'pypi.org' in t.netloc, - is_downloadable, is_wheel, compatible, basename) + return (t.scheme == 'https', 'pypi.org' in t.netloc, is_downloadable, is_wheel, compatible, basename) def prefer_url(self, url1, url2): """ @@ -236,14 +235,14 @@ def convert_url_to_download_info(self, url, project_name): If it is, a dictionary is returned with keys "name", "version", "filename" and "url"; otherwise, None is returned. """ + def same_project(name1, name2): return normalize_name(name1) == normalize_name(name2) result = None scheme, netloc, path, params, query, frag = urlparse(url) if frag.lower().startswith('egg='): # pragma: no cover - logger.debug('%s: version hint in fragment: %r', - project_name, frag) + logger.debug('%s: version hint in fragment: %r', project_name, frag) m = HASHER_HASH.match(frag) if m: algo, digest = m.groups() @@ -267,12 +266,10 @@ def same_project(name1, name2): 'name': wheel.name, 'version': wheel.version, 'filename': wheel.filename, - 'url': urlunparse((scheme, netloc, origpath, - params, query, '')), - 'python-version': ', '.join( - ['.'.join(list(v[2:])) for v in wheel.pyver]), + 'url': urlunparse((scheme, netloc, origpath, params, query, '')), + 'python-version': ', '.join(['.'.join(list(v[2:])) for v in wheel.pyver]), } - except Exception as e: # pragma: no cover + except Exception: # pragma: no cover logger.warning('invalid path for wheel: %s', path) elif not path.endswith(self.downloadable_extensions): # pragma: no cover logger.debug('Not downloadable: %s', path) @@ -291,9 +288,7 @@ def same_project(name1, name2): 'name': name, 'version': version, 'filename': filename, - 'url': urlunparse((scheme, netloc, origpath, - params, query, '')), - #'packagetype': 'sdist', + 'url': urlunparse((scheme, netloc, origpath, params, query, '')), } if pyver: # pragma: no cover result['python-version'] = pyver @@ -304,18 +299,25 @@ def same_project(name1, name2): def _get_digest(self, info): """ - Get a digest from a dictionary by looking at keys of the form - 'algo_digest'. + Get a digest from a dictionary by looking at a "digests" dictionary + or keys of the form 'algo_digest'. Returns a 2-tuple (algo, digest) if found, else None. Currently looks only for SHA256, then MD5. """ result = None - for algo in ('sha256', 'md5'): - key = '%s_digest' % algo - if key in info: - result = (algo, info[key]) - break + if 'digests' in info: + digests = info['digests'] + for algo in ('sha256', 'md5'): + if algo in digests: + result = (algo, digests[algo]) + break + if not result: + for algo in ('sha256', 'md5'): + key = '%s_digest' % algo + if key in info: + result = (algo, info[key]) + break return result def _update_version_data(self, result, info): @@ -362,7 +364,7 @@ def locate(self, requirement, prereleases=False): self.matcher = matcher = scheme.matcher(r.requirement) logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__) versions = self.get_project(r.name) - if len(versions) > 2: # urls and digests keys are present + if len(versions) > 2: # urls and digests keys are present # sometimes, versions are invalid slist = [] vcls = matcher.version_class @@ -371,16 +373,13 @@ def locate(self, requirement, prereleases=False): continue try: if not matcher.match(k): - logger.debug('%s did not match %r', matcher, k) + pass # logger.debug('%s did not match %r', matcher, k) else: if prereleases or not vcls(k).is_prerelease: slist.append(k) - else: - logger.debug('skipping pre-release ' - 'version %s of %s', k, matcher.name) except Exception: # pragma: no cover logger.warning('error matching %s with %r', matcher, k) - pass # slist.append(k) + pass # slist.append(k) if len(slist) > 1: slist = sorted(slist, key=scheme.key) if slist: @@ -406,6 +405,7 @@ class PyPIRPCLocator(Locator): This locator uses XML-RPC to locate distributions. It therefore cannot be used with simple mirrors (that only mirror file content). """ + def __init__(self, url, **kwargs): """ Initialise an instance. @@ -449,11 +449,13 @@ def _get_project(self, name): result['digests'][url] = digest return result + class PyPIJSONLocator(Locator): """ This locator uses PyPI's JSON interface. It's very limited in functionality and probably not worth using. """ + def __init__(self, url, **kwargs): super(PyPIJSONLocator, self).__init__(**kwargs) self.base_url = ensure_slash(url) @@ -469,7 +471,7 @@ def _get_project(self, name): url = urljoin(self.base_url, '%s/json' % quote(name)) try: resp = self.opener.open(url) - data = resp.read().decode() # for now + data = resp.read().decode() # for now d = json.loads(data) md = Metadata(scheme=self.scheme) data = d['info'] @@ -480,7 +482,7 @@ def _get_project(self, name): md.summary = data.get('summary') dist = Distribution(md) dist.locator = self - urls = d['urls'] + # urls = d['urls'] result[md.version] = dist for info in d['urls']: url = info['url'] @@ -491,7 +493,7 @@ def _get_project(self, name): # Now get other releases for version, infos in d['releases'].items(): if version == md.version: - continue # already done + continue # already done omd = Metadata(scheme=self.scheme) omd.name = md.name omd.version = version @@ -504,6 +506,8 @@ def _get_project(self, name): odist.digests[url] = self._get_digest(info) result['urls'].setdefault(version, set()).add(url) result['digests'][url] = self._get_digest(info) + + # for info in urls: # md.source_url = info['url'] # dist.digest = self._get_digest(info) @@ -527,7 +531,8 @@ class Page(object): # or immediately followed by a "rel" attribute. The attribute values can be # declared with double quotes, single quotes or no quotes - which leads to # the length of the expression. - _href = re.compile(""" + _href = re.compile( + """ (rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*))\\s+)? href\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)) (\\s+rel\\s*=\\s*(?:"(?P[^"]*)"|'(?P[^']*)'|(?P[^>\\s\n]*)))? @@ -554,17 +559,16 @@ def links(self): about their "rel" attribute, for determining which ones to treat as downloads and which ones to queue for further scraping. """ + def clean(url): "Tidy up an URL." scheme, netloc, path, params, query, frag = urlparse(url) - return urlunparse((scheme, netloc, quote(path), - params, query, frag)) + return urlunparse((scheme, netloc, quote(path), params, query, frag)) result = set() for match in self._href.finditer(self.data): d = match.groupdict('') - rel = (d['rel1'] or d['rel2'] or d['rel3'] or - d['rel4'] or d['rel5'] or d['rel6']) + rel = (d['rel1'] or d['rel2'] or d['rel3'] or d['rel4'] or d['rel5'] or d['rel6']) url = d['url1'] or d['url2'] or d['url3'] url = urljoin(self.base_url, url) url = unescape(url) @@ -586,7 +590,7 @@ class SimpleScrapingLocator(Locator): # These are used to deal with various Content-Encoding schemes. decoders = { 'deflate': zlib.decompress, - 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(), + 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(), 'none': lambda b: b, } @@ -626,7 +630,7 @@ def _prepare_threads(self): self._threads = [] for i in range(self.num_workers): t = threading.Thread(target=self._fetch) - t.setDaemon(True) + t.daemon = True t.start() self._threads.append(t) @@ -638,7 +642,7 @@ def _wait_threads(self): # Note that you need two loops, since you can't say which # thread will get each sentinel for t in self._threads: - self._to_fetch.put(None) # sentinel + self._to_fetch.put(None) # sentinel for t in self._threads: t.join() self._threads = [] @@ -686,7 +690,7 @@ def _process_download(self, url): info = self.convert_url_to_download_info(url, self.project_name) logger.debug('process_download: %s -> %s', url, info) if info: - with self._lock: # needed because self.result is shared + with self._lock: # needed because self.result is shared self._update_version_data(self.result, info) return info @@ -696,8 +700,7 @@ def _should_queue(self, link, referrer, rel): particular "rel" attribute should be queued for scraping. """ scheme, netloc, path, _, _, _ = urlparse(link) - if path.endswith(self.source_extensions + self.binary_extensions + - self.excluded_extensions): + if path.endswith(self.source_extensions + self.binary_extensions + self.excluded_extensions): result = False elif self.skip_externals and not link.startswith(self.base_url): result = False @@ -715,8 +718,7 @@ def _should_queue(self, link, referrer, rel): result = False else: result = True - logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, - referrer, result) + logger.debug('should_queue: %s (%s) from %s -> %s', link, rel, referrer, result) return result def _fetch(self): @@ -731,14 +733,13 @@ def _fetch(self): try: if url: page = self.get_page(url) - if page is None: # e.g. after an error + if page is None: # e.g. after an error continue for link, rel in page.links: if link not in self._seen: try: self._seen.add(link) - if (not self._process_download(link) and - self._should_queue(link, url, rel)): + if (not self._process_download(link) and self._should_queue(link, url, rel)): logger.debug('Queueing %s from %s', link, url) self._to_fetch.put(link) except MetadataInvalidError: # e.g. invalid versions @@ -749,7 +750,7 @@ def _fetch(self): # always do this, to avoid hangs :-) self._to_fetch.task_done() if not url: - #logger.debug('Sentinel seen, quitting.') + # logger.debug('Sentinel seen, quitting.') break def get_page(self, url): @@ -786,7 +787,7 @@ def get_page(self, url): data = resp.read() encoding = headers.get('Content-Encoding') if encoding: - decoder = self.decoders[encoding] # fail if not found + decoder = self.decoders[encoding] # fail if not found data = decoder(data) encoding = 'utf-8' m = CHARSET.search(content_type) @@ -795,7 +796,7 @@ def get_page(self, url): try: data = data.decode(encoding) except UnicodeError: # pragma: no cover - data = data.decode('latin-1') # fallback + data = data.decode('latin-1') # fallback result = Page(data, final_url) self._page_cache[final_url] = result except HTTPError as e: @@ -808,7 +809,7 @@ def get_page(self, url): except Exception as e: # pragma: no cover logger.exception('Fetch failed: %s: %s', url, e) finally: - self._page_cache[url] = result # even if None (failure) + self._page_cache[url] = result # even if None (failure) return result _distname_re = re.compile(']*>([^<]+)<') @@ -825,6 +826,7 @@ def get_distribution_names(self): result.add(match.group(1)) return result + class DirectoryLocator(Locator): """ This class locates distributions in a directory tree. @@ -861,9 +863,7 @@ def _get_project(self, name): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) - url = urlunparse(('file', '', - pathname2url(os.path.abspath(fn)), - '', '', '')) + url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, name) if info: self._update_version_data(result, info) @@ -880,9 +880,7 @@ def get_distribution_names(self): for fn in files: if self.should_include(fn, root): fn = os.path.join(root, fn) - url = urlunparse(('file', '', - pathname2url(os.path.abspath(fn)), - '', '', '')) + url = urlunparse(('file', '', pathname2url(os.path.abspath(fn)), '', '', '')) info = self.convert_url_to_download_info(url, None) if info: result.add(info['name']) @@ -890,6 +888,7 @@ def get_distribution_names(self): break return result + class JSONLocator(Locator): """ This locator uses special extended metadata (not available on PyPI) and is @@ -897,6 +896,7 @@ class JSONLocator(Locator): require archive downloads before dependencies can be determined! As you might imagine, that can be slow. """ + def get_distribution_names(self): """ Return all the distribution names known to this locator. @@ -913,9 +913,9 @@ def _get_project(self, name): # We don't store summary in project metadata as it makes # the data bigger for no benefit during dependency # resolution - dist = make_dist(data['name'], info['version'], - summary=data.get('summary', - 'Placeholder for summary'), + dist = make_dist(data['name'], + info['version'], + summary=data.get('summary', 'Placeholder for summary'), scheme=self.scheme) md = dist.metadata md.source_url = info['url'] @@ -928,11 +928,13 @@ def _get_project(self, name): result['urls'].setdefault(dist.version, set()).add(info['url']) return result + class DistPathLocator(Locator): """ This locator finds installed distributions in a path. It can be useful for adding to an :class:`AggregatingLocator`. """ + def __init__(self, distpath, **kwargs): """ Initialise an instance. @@ -950,8 +952,12 @@ def _get_project(self, name): else: result = { dist.version: dist, - 'urls': {dist.version: set([dist.source_url])}, - 'digests': {dist.version: set([None])} + 'urls': { + dist.version: set([dist.source_url]) + }, + 'digests': { + dist.version: set([None]) + } } return result @@ -960,6 +966,7 @@ class AggregatingLocator(Locator): """ This class allows you to chain and/or merge a list of locators. """ + def __init__(self, *locators, **kwargs): """ Initialise an instance. @@ -1046,17 +1053,14 @@ def get_distribution_names(self): # We use a legacy scheme simply because most of the dists on PyPI use legacy -# versions which don't conform to PEP 426 / PEP 440. +# versions which don't conform to PEP 440. default_locator = AggregatingLocator( - JSONLocator(), - SimpleScrapingLocator('https://pypi.org/simple/', - timeout=3.0), - scheme='legacy') + # JSONLocator(), # don't use as PEP 426 is withdrawn + SimpleScrapingLocator('https://pypi.org/simple/', timeout=3.0), + scheme='legacy') locate = default_locator.locate -NAME_VERSION_RE = re.compile(r'(?P[\w-]+)\s*' - r'\(\s*(==\s*)?(?P[^)]+)\)$') class DependencyFinder(object): """ @@ -1129,7 +1133,7 @@ def find_providers(self, reqt): :return: A set of distribution which can fulfill the requirement. """ matcher = self.get_matcher(reqt) - name = matcher.key # case-insensitive + name = matcher.key # case-insensitive result = set() provided = self.provided if name in provided: @@ -1171,8 +1175,7 @@ def try_to_replace(self, provider, other, problems): unmatched.add(s) if unmatched: # can't replace other with provider - problems.add(('cantreplace', provider, other, - frozenset(unmatched))) + problems.add(('cantreplace', provider, other, frozenset(unmatched))) result = False else: # can replace other with provider @@ -1225,8 +1228,7 @@ def find(self, requirement, meta_extras=None, prereleases=False): dist = odist = requirement logger.debug('passed %s as requirement', odist) else: - dist = odist = self.locator.locate(requirement, - prereleases=prereleases) + dist = odist = self.locator.locate(requirement, prereleases=prereleases) if dist is None: raise DistlibException('Unable to locate %r' % requirement) logger.debug('located %s', odist) @@ -1236,11 +1238,11 @@ def find(self, requirement, meta_extras=None, prereleases=False): install_dists = set([odist]) while todo: dist = todo.pop() - name = dist.key # case-insensitive + name = dist.key # case-insensitive if name not in self.dists_by_name: self.add_distribution(dist) else: - #import pdb; pdb.set_trace() + # import pdb; pdb.set_trace() other = self.dists_by_name[name] if other != dist: self.try_to_replace(dist, other, problems) @@ -1273,8 +1275,7 @@ def find(self, requirement, meta_extras=None, prereleases=False): providers.add(provider) if r in ireqts and dist in install_dists: install_dists.add(provider) - logger.debug('Adding %s to install_dists', - provider.name_and_version) + logger.debug('Adding %s to install_dists', provider.name_and_version) for p in providers: name = p.key if name not in self.dists_by_name: @@ -1289,7 +1290,6 @@ def find(self, requirement, meta_extras=None, prereleases=False): for dist in dists: dist.build_time_dependency = dist not in install_dists if dist.build_time_dependency: - logger.debug('%s is a build-time dependency only.', - dist.name_and_version) + logger.debug('%s is a build-time dependency only.', dist.name_and_version) logger.debug('find done for %s', odist) return dists, problems diff --git a/src/rez/vendor/distlib/manifest.py b/src/rez/vendor/distlib/manifest.py index ca0fe442d..420dcf12e 100644 --- a/src/rez/vendor/distlib/manifest.py +++ b/src/rez/vendor/distlib/manifest.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2012-2013 Python Software Foundation. +# Copyright (C) 2012-2023 Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """ @@ -34,9 +34,11 @@ # _PYTHON_VERSION = sys.version_info[:2] + class Manifest(object): - """A list of files built by on exploring the filesystem and filtered by - applying various patterns to what we find there. + """ + A list of files built by exploring the filesystem and filtered by applying various + patterns to what we find there. """ def __init__(self, base=None): @@ -154,10 +156,7 @@ def process_directive(self, directive): elif action == 'exclude': for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=True) - #if not found: - # logger.warning('no previously-included files ' - # 'found matching %r', pattern) + self._exclude_pattern(pattern, anchor=True) elif action == 'global-include': for pattern in patterns: @@ -167,11 +166,7 @@ def process_directive(self, directive): elif action == 'global-exclude': for pattern in patterns: - found = self._exclude_pattern(pattern, anchor=False) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found anywhere in ' - # 'distribution', pattern) + self._exclude_pattern(pattern, anchor=False) elif action == 'recursive-include': for pattern in patterns: @@ -181,11 +176,7 @@ def process_directive(self, directive): elif action == 'recursive-exclude': for pattern in patterns: - found = self._exclude_pattern(pattern, prefix=thedir) - #if not found: - # logger.warning('no previously-included files ' - # 'matching %r found under directory %r', - # pattern, thedir) + self._exclude_pattern(pattern, prefix=thedir) elif action == 'graft': if not self._include_pattern(None, prefix=dirpattern): diff --git a/src/rez/vendor/distlib/markers.py b/src/rez/vendor/distlib/markers.py index ee1f3e236..3f5632be4 100644 --- a/src/rez/vendor/distlib/markers.py +++ b/src/rez/vendor/distlib/markers.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2012-2017 Vinay Sajip. +# Copyright (C) 2012-2023 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # @@ -13,23 +13,37 @@ # as ~= and === which aren't in Python, necessitating a different approach. import os +import re import sys import platform -import re -from .compat import python_implementation, urlparse, string_types +from .compat import string_types from .util import in_venv, parse_marker +from .version import LegacyVersion as LV __all__ = ['interpret'] +_VERSION_PATTERN = re.compile(r'((\d+(\.\d+)*\w*)|\'(\d+(\.\d+)*\w*)\'|\"(\d+(\.\d+)*\w*)\")') +_VERSION_MARKERS = {'python_version', 'python_full_version'} + + +def _is_version_marker(s): + return isinstance(s, string_types) and s in _VERSION_MARKERS + + def _is_literal(o): if not isinstance(o, string_types) or not o: return False return o[0] in '\'"' + +def _get_versions(s): + return {LV(m.groups()[0]) for m in _VERSION_PATTERN.finditer(s)} + + class Evaluator(object): """ - This class is used to evaluate marker expessions. + This class is used to evaluate marker expressions. """ operations = { @@ -37,10 +51,10 @@ class Evaluator(object): '===': lambda x, y: x == y, '~=': lambda x, y: x == y or x > y, '!=': lambda x, y: x != y, - '<': lambda x, y: x < y, - '<=': lambda x, y: x == y or x < y, - '>': lambda x, y: x > y, - '>=': lambda x, y: x == y or x > y, + '<': lambda x, y: x < y, + '<=': lambda x, y: x == y or x < y, + '>': lambda x, y: x > y, + '>=': lambda x, y: x == y or x > y, 'and': lambda x, y: x and y, 'or': lambda x, y: x or y, 'in': lambda x, y: x in y, @@ -71,10 +85,22 @@ def evaluate(self, expr, context): lhs = self.evaluate(elhs, context) rhs = self.evaluate(erhs, context) + if ((_is_version_marker(elhs) or _is_version_marker(erhs)) and + op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')): + lhs = LV(lhs) + rhs = LV(rhs) + elif _is_version_marker(elhs) and op in ('in', 'not in'): + lhs = LV(lhs) + rhs = _get_versions(rhs) result = self.operations[op](lhs, rhs) return result + +_DIGITS = re.compile(r'\d+\.\d+') + + def default_context(): + def format_full_version(info): version = '%s.%s.%s' % (info.major, info.minor, info.micro) kind = info.releaselevel @@ -89,6 +115,9 @@ def format_full_version(info): implementation_version = '0' implementation_name = '' + ppv = platform.python_version() + m = _DIGITS.match(ppv) + pv = m.group(0) result = { 'implementation_name': implementation_name, 'implementation_version': implementation_version, @@ -99,17 +128,19 @@ def format_full_version(info): 'platform_system': platform.system(), 'platform_version': platform.version(), 'platform_in_venv': str(in_venv()), - 'python_full_version': platform.python_version(), - 'python_version': platform.python_version()[:3], + 'python_full_version': ppv, + 'python_version': pv, 'sys_platform': sys.platform, } return result + DEFAULT_CONTEXT = default_context() del default_context evaluator = Evaluator() + def interpret(marker, execution_context=None): """ Interpret a marker and return a result depending on environment. diff --git a/src/rez/vendor/distlib/metadata.py b/src/rez/vendor/distlib/metadata.py index 2d61378e9..ce9a34b3e 100644 --- a/src/rez/vendor/distlib/metadata.py +++ b/src/rez/vendor/distlib/metadata.py @@ -5,7 +5,7 @@ # """Implementation of the Metadata for Python packages PEPs. -Supports all metadata formats (1.0, 1.1, 1.2, and 2.0 experimental). +Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and 2.2). """ from __future__ import unicode_literals @@ -15,7 +15,6 @@ import logging import re - from . import DistlibException, __version__ from .compat import StringIO, string_types, text_type from .markers import interpret @@ -40,6 +39,7 @@ class MetadataUnrecognizedVersionError(DistlibException): class MetadataInvalidError(DistlibException): """A metadata value is invalid""" + # public API of this module __all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION'] @@ -52,52 +52,42 @@ class MetadataInvalidError(DistlibException): _LINE_PREFIX_1_2 = re.compile('\n \\|') _LINE_PREFIX_PRE_1_2 = re.compile('\n ') -_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', - 'Summary', 'Description', - 'Keywords', 'Home-page', 'Author', 'Author-email', - 'License') - -_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', - 'Supported-Platform', 'Summary', 'Description', - 'Keywords', 'Home-page', 'Author', 'Author-email', - 'License', 'Classifier', 'Download-URL', 'Obsoletes', +_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Summary', 'Description', 'Keywords', 'Home-page', + 'Author', 'Author-email', 'License') + +_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', 'License', 'Classifier', 'Download-URL', 'Obsoletes', 'Provides', 'Requires') -_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', - 'Download-URL') +_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier', 'Download-URL') -_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', - 'Supported-Platform', 'Summary', 'Description', - 'Keywords', 'Home-page', 'Author', 'Author-email', - 'Maintainer', 'Maintainer-email', 'License', - 'Classifier', 'Download-URL', 'Obsoletes-Dist', - 'Project-URL', 'Provides-Dist', 'Requires-Dist', +_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Requires-External') -_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', - 'Obsoletes-Dist', 'Requires-External', 'Maintainer', - 'Maintainer-email', 'Project-URL') +_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python', 'Obsoletes-Dist', 'Requires-External', + 'Maintainer', 'Maintainer-email', 'Project-URL') -_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', - 'Supported-Platform', 'Summary', 'Description', - 'Keywords', 'Home-page', 'Author', 'Author-email', - 'Maintainer', 'Maintainer-email', 'License', - 'Classifier', 'Download-URL', 'Obsoletes-Dist', - 'Project-URL', 'Provides-Dist', 'Requires-Dist', - 'Requires-Python', 'Requires-External', 'Private-Version', - 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension', - 'Provides-Extra') +_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform', 'Supported-Platform', 'Summary', 'Description', + 'Keywords', 'Home-page', 'Author', 'Author-email', 'Maintainer', 'Maintainer-email', 'License', + 'Classifier', 'Download-URL', 'Obsoletes-Dist', 'Project-URL', 'Provides-Dist', 'Requires-Dist', + 'Requires-Python', 'Requires-External', 'Private-Version', 'Obsoleted-By', 'Setup-Requires-Dist', + 'Extension', 'Provides-Extra') -_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', - 'Setup-Requires-Dist', 'Extension') +_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension') # See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in # the metadata. Include them in the tuple literal below to allow them # (for now). -_566_FIELDS = _426_FIELDS + ('Description-Content-Type', - 'Requires', 'Provides') +# Ditto for Obsoletes - see issue #140. +_566_FIELDS = _426_FIELDS + ('Description-Content-Type', 'Requires', 'Provides', 'Obsoletes') -_566_MARKERS = ('Description-Content-Type',) +_566_MARKERS = ('Description-Content-Type', ) + +_643_MARKERS = ('Dynamic', 'License-File') + +_643_FIELDS = _566_FIELDS + _643_MARKERS _ALL_FIELDS = set() _ALL_FIELDS.update(_241_FIELDS) @@ -105,6 +95,7 @@ class MetadataInvalidError(DistlibException): _ALL_FIELDS.update(_345_FIELDS) _ALL_FIELDS.update(_426_FIELDS) _ALL_FIELDS.update(_566_FIELDS) +_ALL_FIELDS.update(_643_FIELDS) EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''') @@ -117,27 +108,24 @@ def _version2fieldlist(version): elif version == '1.2': return _345_FIELDS elif version in ('1.3', '2.1'): - return _345_FIELDS + _566_FIELDS + # avoid adding field names if already there + return _345_FIELDS + tuple(f for f in _566_FIELDS if f not in _345_FIELDS) elif version == '2.0': - return _426_FIELDS + raise ValueError('Metadata 2.0 is withdrawn and not supported') + # return _426_FIELDS + elif version == '2.2': + return _643_FIELDS raise MetadataUnrecognizedVersionError(version) def _best_version(fields): """Detect the best version depending on the fields used.""" - def _has_marker(keys, markers): - for marker in markers: - if marker in keys: - return True - return False - keys = [] - for key, value in fields.items(): - if value in ([], 'UNKNOWN', None): - continue - keys.append(key) + def _has_marker(keys, markers): + return any(marker in keys for marker in markers) - possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.0', '2.1'] + keys = [key for key, value in fields.items() if value not in ([], 'UNKNOWN', None)] + possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.1', '2.2'] # 2.0 removed # first let's try to see if a field is not part of one of the version for key in keys: @@ -157,13 +145,16 @@ def _has_marker(keys, markers): if key != 'Description': # In 2.1, description allowed after headers possible_versions.remove('2.1') logger.debug('Removed 2.1 due to %s', key) - if key not in _426_FIELDS and '2.0' in possible_versions: - possible_versions.remove('2.0') - logger.debug('Removed 2.0 due to %s', key) + if key not in _643_FIELDS and '2.2' in possible_versions: + possible_versions.remove('2.2') + logger.debug('Removed 2.2 due to %s', key) + # if key not in _426_FIELDS and '2.0' in possible_versions: + # possible_versions.remove('2.0') + # logger.debug('Removed 2.0 due to %s', key) # possible_version contains qualified versions if len(possible_versions) == 1: - return possible_versions[0] # found ! + return possible_versions[0] # found ! elif len(possible_versions) == 0: logger.debug('Out of options - unknown metadata set: %s', fields) raise MetadataConflictError('Unknown metadata set') @@ -172,16 +163,18 @@ def _has_marker(keys, markers): is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS) is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS) is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS) - is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) - if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_0) > 1: - raise MetadataConflictError('You used incompatible 1.1/1.2/2.0/2.1 fields') + # is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS) + is_2_2 = '2.2' in possible_versions and _has_marker(keys, _643_MARKERS) + if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_2) > 1: + raise MetadataConflictError('You used incompatible 1.1/1.2/2.1/2.2 fields') - # we have the choice, 1.0, or 1.2, or 2.0 + # we have the choice, 1.0, or 1.2, 2.1 or 2.2 # - 1.0 has a broken Summary field but works with all tools # - 1.1 is to avoid # - 1.2 fixes Summary but has little adoption - # - 2.0 adds more features and is very new - if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_0: + # - 2.1 adds more features + # - 2.2 is the latest + if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_2: # we couldn't find any specific marker if PKG_INFO_PREFERRED_VERSION in possible_versions: return PKG_INFO_PREFERRED_VERSION @@ -191,53 +184,26 @@ def _has_marker(keys, markers): return '1.2' if is_2_1: return '2.1' + # if is_2_2: + # return '2.2' + + return '2.2' + - return '2.0' - -_ATTR2FIELD = { - 'metadata_version': 'Metadata-Version', - 'name': 'Name', - 'version': 'Version', - 'platform': 'Platform', - 'supported_platform': 'Supported-Platform', - 'summary': 'Summary', - 'description': 'Description', - 'keywords': 'Keywords', - 'home_page': 'Home-page', - 'author': 'Author', - 'author_email': 'Author-email', - 'maintainer': 'Maintainer', - 'maintainer_email': 'Maintainer-email', - 'license': 'License', - 'classifier': 'Classifier', - 'download_url': 'Download-URL', - 'obsoletes_dist': 'Obsoletes-Dist', - 'provides_dist': 'Provides-Dist', - 'requires_dist': 'Requires-Dist', - 'setup_requires_dist': 'Setup-Requires-Dist', - 'requires_python': 'Requires-Python', - 'requires_external': 'Requires-External', - 'requires': 'Requires', - 'provides': 'Provides', - 'obsoletes': 'Obsoletes', - 'project_url': 'Project-URL', - 'private_version': 'Private-Version', - 'obsoleted_by': 'Obsoleted-By', - 'extension': 'Extension', - 'provides_extra': 'Provides-Extra', -} +# This follows the rules about transforming keys as described in +# https://www.python.org/dev/peps/pep-0566/#id17 +_ATTR2FIELD = {name.lower().replace("-", "_"): name for name in _ALL_FIELDS} +_FIELD2ATTR = {field: attr for attr, field in _ATTR2FIELD.items()} _PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist') -_VERSIONS_FIELDS = ('Requires-Python',) -_VERSION_FIELDS = ('Version',) -_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', - 'Requires', 'Provides', 'Obsoletes-Dist', - 'Provides-Dist', 'Requires-Dist', 'Requires-External', - 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', - 'Provides-Extra', 'Extension') -_LISTTUPLEFIELDS = ('Project-URL',) +_VERSIONS_FIELDS = ('Requires-Python', ) +_VERSION_FIELDS = ('Version', ) +_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes', 'Requires', 'Provides', 'Obsoletes-Dist', 'Provides-Dist', + 'Requires-Dist', 'Requires-External', 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist', + 'Provides-Extra', 'Extension', 'License-File') +_LISTTUPLEFIELDS = ('Project-URL', ) -_ELEMENTSFIELD = ('Keywords',) +_ELEMENTSFIELD = ('Keywords', ) _UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description') @@ -262,17 +228,17 @@ def _get_name_and_version(name, version, for_filename=False): class LegacyMetadata(object): """The legacy metadata of a release. - Supports versions 1.0, 1.1 and 1.2 (auto-detected). You can + Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can instantiate the class with one of these arguments (or none): - *path*, the path to a metadata file - *fileobj* give a file-like object with metadata as content - *mapping* is a dict-like object - *scheme* is a version scheme name """ + # TODO document the mapping API and UNKNOWN default key - def __init__(self, path=None, fileobj=None, mapping=None, - scheme='default'): + def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'): if [path, fileobj, mapping].count(None) < 2: raise TypeError('path, fileobj and mapping are exclusive') self._fields = {} @@ -307,8 +273,7 @@ def __delitem__(self, name): raise KeyError(name) def __contains__(self, name): - return (name in self._fields or - self._convert_name(name) in self._fields) + return (name in self._fields or self._convert_name(name) in self._fields) def _convert_name(self, name): if name in _ALL_FIELDS: @@ -336,12 +301,12 @@ def __getattr__(self, name): # Public API # -# dependencies = property(_get_dependencies, _set_dependencies) - def get_fullname(self, filesafe=False): - """Return the distribution name with version. + """ + Return the distribution name with version. - If filesafe is true, return a filename-escaped form.""" + If filesafe is true, return a filename-escaped form. + """ return _get_name_and_version(self['Name'], self['Version'], filesafe) def is_field(self, name): @@ -381,6 +346,11 @@ def read_file(self, fileob): value = msg[field] if value is not None and value != 'UNKNOWN': self.set(field, value) + + # PEP 566 specifies that the body be used for the description, if + # available + body = msg.get_payload() + self["Description"] = body if body else self["Description"] # logger.debug('Attempting to set metadata for %s', self) # self.set_metadata_version() @@ -427,6 +397,7 @@ def update(self, other=None, **kwargs): Keys that don't match a metadata field or that have an empty value are dropped. """ + def _set(key, value): if key in _ATTR2FIELD and value: self.set(self._convert_name(key), value) @@ -449,14 +420,12 @@ def set(self, name, value): """Control then set a metadata field.""" name = self._convert_name(name) - if ((name in _ELEMENTSFIELD or name == 'Platform') and - not isinstance(value, (list, tuple))): + if ((name in _ELEMENTSFIELD or name == 'Platform') and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [v.strip() for v in value.split(',')] else: value = [] - elif (name in _LISTFIELDS and - not isinstance(value, (list, tuple))): + elif (name in _LISTFIELDS and not isinstance(value, (list, tuple))): if isinstance(value, string_types): value = [value] else: @@ -470,18 +439,14 @@ def set(self, name, value): for v in value: # check that the values are valid if not scheme.is_valid_matcher(v.split(';')[0]): - logger.warning( - "'%s': '%s' is not valid (field '%s')", - project_name, v, name) + logger.warning("'%s': '%s' is not valid (field '%s')", project_name, v, name) # FIXME this rejects UNKNOWN, is that right? elif name in _VERSIONS_FIELDS and value is not None: if not scheme.is_valid_constraint_list(value): - logger.warning("'%s': '%s' is not a valid version (field '%s')", - project_name, value, name) + logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) elif name in _VERSION_FIELDS and value is not None: if not scheme.is_valid_version(value): - logger.warning("'%s': '%s' is not a valid version (field '%s')", - project_name, value, name) + logger.warning("'%s': '%s' is not a valid version (field '%s')", project_name, value, name) if name in _UNICODEFIELDS: if name == 'Description': @@ -551,10 +516,8 @@ def are_valid_constraints(value): return True for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints), - (_VERSIONS_FIELDS, - scheme.is_valid_constraint_list), - (_VERSION_FIELDS, - scheme.is_valid_version)): + (_VERSIONS_FIELDS, scheme.is_valid_constraint_list), (_VERSION_FIELDS, + scheme.is_valid_version)): for field in fields: value = self.get(field, None) if value is not None and not controller(value): @@ -567,57 +530,21 @@ def todict(self, skip_missing=False): Field names will be converted to use the underscore-lowercase style instead of hyphen-mixed case (i.e. home_page instead of Home-page). + This is as per https://www.python.org/dev/peps/pep-0566/#id17. """ self.set_metadata_version() - mapping_1_0 = ( - ('metadata_version', 'Metadata-Version'), - ('name', 'Name'), - ('version', 'Version'), - ('summary', 'Summary'), - ('home_page', 'Home-page'), - ('author', 'Author'), - ('author_email', 'Author-email'), - ('license', 'License'), - ('description', 'Description'), - ('keywords', 'Keywords'), - ('platform', 'Platform'), - ('classifiers', 'Classifier'), - ('download_url', 'Download-URL'), - ) + fields = _version2fieldlist(self['Metadata-Version']) data = {} - for key, field_name in mapping_1_0: + + for field_name in fields: if not skip_missing or field_name in self._fields: - data[key] = self[field_name] - - if self['Metadata-Version'] == '1.2': - mapping_1_2 = ( - ('requires_dist', 'Requires-Dist'), - ('requires_python', 'Requires-Python'), - ('requires_external', 'Requires-External'), - ('provides_dist', 'Provides-Dist'), - ('obsoletes_dist', 'Obsoletes-Dist'), - ('project_url', 'Project-URL'), - ('maintainer', 'Maintainer'), - ('maintainer_email', 'Maintainer-email'), - ) - for key, field_name in mapping_1_2: - if not skip_missing or field_name in self._fields: - if key != 'project_url': - data[key] = self[field_name] - else: - data[key] = [','.join(u) for u in self[field_name]] - - elif self['Metadata-Version'] == '1.1': - mapping_1_1 = ( - ('provides', 'Provides'), - ('requires', 'Requires'), - ('obsoletes', 'Obsoletes'), - ) - for key, field_name in mapping_1_1: - if not skip_missing or field_name in self._fields: + key = _FIELD2ATTR[field_name] + if key != 'project_url': data[key] = self[field_name] + else: + data[key] = [','.join(u) for u in self[field_name]] return data @@ -646,8 +573,7 @@ def items(self): return [(key, self[key]) for key in self.keys()] def __repr__(self): - return '<%s %s %s>' % (self.__class__.__name__, self.name, - self.version) + return '<%s %s %s>' % (self.__class__.__name__, self.name, self.version) METADATA_FILENAME = 'pydist.json' @@ -657,7 +583,7 @@ def __repr__(self): class Metadata(object): """ - The metadata of a release. This implementation uses 2.0 (JSON) + The metadata of a release. This implementation uses 2.1 metadata where possible. If not possible, it wraps a LegacyMetadata instance which handles the key-value metadata format. """ @@ -666,6 +592,8 @@ class Metadata(object): NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I) + FIELDNAME_MATCHER = re.compile('^[A-Z]([0-9A-Z-]*[0-9A-Z])?$', re.I) + VERSION_MATCHER = PEP440_VERSION_RE SUMMARY_MATCHER = re.compile('.{1,2047}') @@ -677,7 +605,7 @@ class Metadata(object): MANDATORY_KEYS = { 'name': (), 'version': (), - 'summary': ('legacy',), + 'summary': ('legacy', ), } INDEX_KEYS = ('name version license summary description author ' @@ -690,21 +618,21 @@ class Metadata(object): SYNTAX_VALIDATORS = { 'metadata_version': (METADATA_VERSION_MATCHER, ()), - 'name': (NAME_MATCHER, ('legacy',)), - 'version': (VERSION_MATCHER, ('legacy',)), - 'summary': (SUMMARY_MATCHER, ('legacy',)), + 'name': (NAME_MATCHER, ('legacy', )), + 'version': (VERSION_MATCHER, ('legacy', )), + 'summary': (SUMMARY_MATCHER, ('legacy', )), + 'dynamic': (FIELDNAME_MATCHER, ('legacy', )), } __slots__ = ('_legacy', '_data', 'scheme') - def __init__(self, path=None, fileobj=None, mapping=None, - scheme='default'): + def __init__(self, path=None, fileobj=None, mapping=None, scheme='default'): if [path, fileobj, mapping].count(None) < 2: raise TypeError('path, fileobj and mapping are exclusive') self._legacy = None self._data = None self.scheme = scheme - #import pdb; pdb.set_trace() + # import pdb; pdb.set_trace() if mapping is not None: try: self._validate_mapping(mapping, scheme) @@ -738,8 +666,7 @@ def __init__(self, path=None, fileobj=None, mapping=None, # The ValueError comes from the json.load - if that # succeeds and we get a validation error, we want # that to propagate - self._legacy = LegacyMetadata(fileobj=StringIO(data), - scheme=scheme) + self._legacy = LegacyMetadata(fileobj=StringIO(data), scheme=scheme) self.validate() common_keys = set(('name', 'version', 'license', 'keywords', 'summary')) @@ -777,8 +704,7 @@ def __getattribute__(self, key): result = self._legacy.get(lk) else: value = None if maker is None else maker() - if key not in ('commands', 'exports', 'modules', 'namespaces', - 'classifiers'): + if key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'): result = self._data.get(key, value) else: # special cases for PEP 459 @@ -815,8 +741,7 @@ def _validate_value(self, key, value, scheme=None): m = pattern.match(value) if not m: raise MetadataInvalidError("'%s' is an invalid value for " - "the '%s' property" % (value, - key)) + "the '%s' property" % (value, key)) def __setattr__(self, key, value): self._validate_value(key, value) @@ -828,8 +753,7 @@ def __setattr__(self, key, value): if lk is None: raise NotImplementedError self._legacy[lk] = value - elif key not in ('commands', 'exports', 'modules', 'namespaces', - 'classifiers'): + elif key not in ('commands', 'exports', 'modules', 'namespaces', 'classifiers'): self._data[key] = value else: # special cases for PEP 459 @@ -917,8 +841,7 @@ def get_requirements(self, reqts, extras=None, env=None): # A recursive call, but it should terminate since 'test' # has been removed from the extras reqts = self._data.get('%s_requires' % key, []) - result.extend(self.get_requirements(reqts, extras=extras, - env=env)) + result.extend(self.get_requirements(reqts, extras=extras, env=env)) return result @property @@ -959,8 +882,7 @@ def validate(self): if self._legacy: missing, warnings = self._legacy.check(True) if missing or warnings: - logger.warning('Metadata: missing: %s, warnings: %s', - missing, warnings) + logger.warning('Metadata: missing: %s, warnings: %s', missing, warnings) else: self._validate_mapping(self._data, self.scheme) @@ -977,9 +899,8 @@ def _from_legacy(self): 'metadata_version': self.METADATA_VERSION, 'generator': self.GENERATOR, } - lmd = self._legacy.todict(True) # skip missing ones - for k in ('name', 'version', 'license', 'summary', 'description', - 'classifier'): + lmd = self._legacy.todict(True) # skip missing ones + for k in ('name', 'version', 'license', 'summary', 'description', 'classifier'): if k in lmd: if k == 'classifier': nk = 'classifiers' @@ -990,26 +911,30 @@ def _from_legacy(self): if kw == ['']: kw = [] result['keywords'] = kw - keys = (('requires_dist', 'run_requires'), - ('setup_requires_dist', 'build_requires')) + keys = (('requires_dist', 'run_requires'), ('setup_requires_dist', 'build_requires')) for ok, nk in keys: if ok in lmd and lmd[ok]: result[nk] = [{'requires': lmd[ok]}] result['provides'] = self.provides - author = {} - maintainer = {} + # author = {} + # maintainer = {} return result LEGACY_MAPPING = { 'name': 'Name', 'version': 'Version', - 'license': 'License', + ('extensions', 'python.details', 'license'): 'License', 'summary': 'Summary', 'description': 'Description', - 'classifiers': 'Classifier', + ('extensions', 'python.project', 'project_urls', 'Home'): 'Home-page', + ('extensions', 'python.project', 'contacts', 0, 'name'): 'Author', + ('extensions', 'python.project', 'contacts', 0, 'email'): 'Author-email', + 'source_url': 'Download-URL', + ('extensions', 'python.details', 'classifiers'): 'Classifier', } def _to_legacy(self): + def process_entries(entries): reqts = set() for e in entries: @@ -1034,16 +959,29 @@ def process_entries(entries): assert self._data and not self._legacy result = LegacyMetadata() nmd = self._data + # import pdb; pdb.set_trace() for nk, ok in self.LEGACY_MAPPING.items(): - if nk in nmd: - result[ok] = nmd[nk] + if not isinstance(nk, tuple): + if nk in nmd: + result[ok] = nmd[nk] + else: + d = nmd + found = True + for k in nk: + try: + d = d[k] + except (KeyError, IndexError): + found = False + break + if found: + result[ok] = d r1 = process_entries(self.run_requires + self.meta_requires) r2 = process_entries(self.build_requires + self.dev_requires) if self.extras: result['Provides-Extra'] = sorted(self.extras) result['Requires-Dist'] = sorted(r1) result['Setup-Requires-Dist'] = sorted(r2) - # TODO: other fields such as contacts + # TODO: any other fields wanted return result def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): @@ -1065,12 +1003,10 @@ def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True): else: d = self._data if fileobj: - json.dump(d, fileobj, ensure_ascii=True, indent=2, - sort_keys=True) + json.dump(d, fileobj, ensure_ascii=True, indent=2, sort_keys=True) else: with codecs.open(path, 'w', 'utf-8') as f: - json.dump(d, f, ensure_ascii=True, indent=2, - sort_keys=True) + json.dump(d, f, ensure_ascii=True, indent=2, sort_keys=True) def add_requirements(self, requirements): if self._legacy: @@ -1083,7 +1019,7 @@ def add_requirements(self, requirements): always = entry break if always is None: - always = { 'requires': requirements } + always = {'requires': requirements} run_requires.insert(0, always) else: rset = set(always['requires']) | set(requirements) @@ -1092,5 +1028,4 @@ def add_requirements(self, requirements): def __repr__(self): name = self.name or '(no name)' version = self.version or 'no version' - return '<%s %s %s (%s)>' % (self.__class__.__name__, - self.metadata_version, name, version) + return '<%s %s %s (%s)>' % (self.__class__.__name__, self.metadata_version, name, version) diff --git a/src/rez/vendor/distlib/resources.py b/src/rez/vendor/distlib/resources.py index 18840167a..fef52aa10 100644 --- a/src/rez/vendor/distlib/resources.py +++ b/src/rez/vendor/distlib/resources.py @@ -11,13 +11,12 @@ import logging import os import pkgutil -import shutil import sys import types import zipimport from . import DistlibException -from .util import cached_property, get_cache_base, path_to_cache_dir, Cache +from .util import cached_property, get_cache_base, Cache logger = logging.getLogger(__name__) @@ -283,6 +282,7 @@ def _is_directory(self, path): result = False return result + _finder_registry = { type(None): ResourceFinder, zipimport.zipimporter: ZipResourceFinder @@ -296,6 +296,8 @@ def _is_directory(self, path): import _frozen_importlib as _fi _finder_registry[_fi.SourceFileLoader] = ResourceFinder _finder_registry[_fi.FileFinder] = ResourceFinder + # See issue #146 + _finder_registry[_fi.SourcelessFileLoader] = ResourceFinder del _fi except (ImportError, AttributeError): pass @@ -304,6 +306,7 @@ def _is_directory(self, path): def register_finder(loader, finder_maker): _finder_registry[type(loader)] = finder_maker + _finder_cache = {} diff --git a/src/rez/vendor/distlib/scripts.py b/src/rez/vendor/distlib/scripts.py index 5965e241d..b1fc705b7 100644 --- a/src/rez/vendor/distlib/scripts.py +++ b/src/rez/vendor/distlib/scripts.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2013-2015 Vinay Sajip. +# Copyright (C) 2013-2023 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # @@ -10,11 +10,12 @@ import re import struct import sys +import time +from zipfile import ZipInfo from .compat import sysconfig, detect_encoding, ZipFile from .resources import finder -from .util import (FileOperator, get_export_entry, convert_path, - get_executable, in_venv) +from .util import (FileOperator, get_export_entry, convert_path, get_executable, get_platform, in_venv) logger = logging.getLogger(__name__) @@ -47,8 +48,27 @@ sys.exit(%(func)s()) ''' +# Pre-fetch the contents of all executable wrapper stubs. +# This is to address https://github.com/pypa/pip/issues/12666. +# When updating pip, we rename the old pip in place before installing the +# new version. If we try to fetch a wrapper *after* that rename, the finder +# machinery will be confused as the package is no longer available at the +# location where it was imported from. So we load everything into memory in +# advance. -def _enquote_executable(executable): +if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): + # Issue 31: don't hardcode an absolute package name, but + # determine it relative to the current package + DISTLIB_PACKAGE = __name__.rsplit('.', 1)[0] + + WRAPPERS = { + r.name: r.bytes + for r in finder(DISTLIB_PACKAGE).iterator("") + if r.name.endswith(".exe") + } + + +def enquote_executable(executable): if ' ' in executable: # make sure we quote only the executable in case of env # for example /usr/bin/env "/dir with spaces/bin/jython" @@ -64,6 +84,10 @@ def _enquote_executable(executable): return executable +# Keep the old name around (for now), as there is at least one project using it! +_enquote_executable = enquote_executable + + class ScriptMaker(object): """ A class to copy or create scripts from source scripts or callable @@ -73,21 +97,19 @@ class ScriptMaker(object): executable = None # for shebangs - def __init__(self, source_dir, target_dir, add_launchers=True, - dry_run=False, fileop=None): + def __init__(self, source_dir, target_dir, add_launchers=True, dry_run=False, fileop=None): self.source_dir = source_dir self.target_dir = target_dir self.add_launchers = add_launchers self.force = False self.clobber = False # It only makes sense to set mode bits on POSIX. - self.set_mode = (os.name == 'posix') or (os.name == 'java' and - os._name == 'posix') + self.set_mode = (os.name == 'posix') or (os.name == 'java' and os._name == 'posix') self.variants = set(('', 'X.Y')) self._fileop = fileop or FileOperator(dry_run) - self._is_nt = os.name == 'nt' or ( - os.name == 'java' and os._name == 'nt') + self._is_nt = os.name == 'nt' or (os.name == 'java' and os._name == 'nt') + self.version_info = sys.version_info def _get_alternate_executable(self, executable, options): if options.get('gui', False) and self._is_nt: # pragma: no cover @@ -97,6 +119,7 @@ def _get_alternate_executable(self, executable, options): return executable if sys.platform.startswith('java'): # pragma: no cover + def _is_shell(self, executable): """ Determine if the specified executable is a script @@ -134,6 +157,12 @@ def _build_shebang(self, executable, post_interp): """ if os.name != 'posix': simple_shebang = True + elif getattr(sys, "cross_compiling", False): + # In a cross-compiling environment, the shebang will likely be a + # script; this *must* be invoked with the "safe" version of the + # shebang, or else using os.exec() to run the entry script will + # fail, raising "OSError 8 [Errno 8] Exec format error". + simple_shebang = False else: # Add 3 for '#!' prefix and newline suffix. shebang_length = len(executable) + len(post_interp) + 3 @@ -141,49 +170,60 @@ def _build_shebang(self, executable, post_interp): max_shebang_length = 512 else: max_shebang_length = 127 - simple_shebang = ((b' ' not in executable) and - (shebang_length <= max_shebang_length)) + simple_shebang = ((b' ' not in executable) and (shebang_length <= max_shebang_length)) if simple_shebang: result = b'#!' + executable + post_interp + b'\n' else: result = b'#!/bin/sh\n' result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n' - result += b"' '''" + result += b"' '''\n" return result def _get_shebang(self, encoding, post_interp=b'', options=None): enquote = True if self.executable: executable = self.executable - enquote = False # assume this will be taken care of + enquote = False # assume this will be taken care of elif not sysconfig.is_python_build(): executable = get_executable() elif in_venv(): # pragma: no cover - executable = os.path.join(sysconfig.get_path('scripts'), - 'python%s' % sysconfig.get_config_var('EXE')) + executable = os.path.join(sysconfig.get_path('scripts'), 'python%s' % sysconfig.get_config_var('EXE')) else: # pragma: no cover - executable = os.path.join( - sysconfig.get_config_var('BINDIR'), - 'python%s%s' % (sysconfig.get_config_var('VERSION'), - sysconfig.get_config_var('EXE'))) + if os.name == 'nt': + # for Python builds from source on Windows, no Python executables with + # a version suffix are created, so we use python.exe + executable = os.path.join(sysconfig.get_config_var('BINDIR'), + 'python%s' % (sysconfig.get_config_var('EXE'))) + else: + executable = os.path.join( + sysconfig.get_config_var('BINDIR'), + 'python%s%s' % (sysconfig.get_config_var('VERSION'), sysconfig.get_config_var('EXE'))) if options: executable = self._get_alternate_executable(executable, options) if sys.platform.startswith('java'): # pragma: no cover executable = self._fix_jython_executable(executable) - # Normalise case for Windows - executable = os.path.normcase(executable) + + # Normalise case for Windows - COMMENTED OUT + # executable = os.path.normcase(executable) + # N.B. The normalising operation above has been commented out: See + # issue #124. Although paths in Windows are generally case-insensitive, + # they aren't always. For example, a path containing a ẞ (which is a + # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a + # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by + # Windows as equivalent in path names. + # If the user didn't specify an executable, it may be necessary to # cater for executable paths with spaces (not uncommon on Windows) if enquote: - executable = _enquote_executable(executable) + executable = enquote_executable(executable) # Issue #51: don't use fsencode, since we later try to # check that the shebang is decodable using utf-8. executable = executable.encode('utf-8') # in case of IronPython, play safe and enable frames support - if (sys.platform == 'cli' and '-X:Frames' not in post_interp - and '-X:FullFrames' not in post_interp): # pragma: no cover + if (sys.platform == 'cli' and '-X:Frames' not in post_interp and + '-X:FullFrames' not in post_interp): # pragma: no cover post_interp += b' -X:Frames' shebang = self._build_shebang(executable, post_interp) # Python parser starts to read a script using UTF-8 until @@ -194,8 +234,7 @@ def _get_shebang(self, encoding, post_interp=b'', options=None): try: shebang.decode('utf-8') except UnicodeDecodeError: # pragma: no cover - raise ValueError( - 'The shebang (%r) is not decodable from utf-8' % shebang) + raise ValueError('The shebang (%r) is not decodable from utf-8' % shebang) # If the script is encoded to a custom encoding (use a # #coding:xxx cookie), the shebang has to be decodable from # the script encoding too. @@ -203,15 +242,13 @@ def _get_shebang(self, encoding, post_interp=b'', options=None): try: shebang.decode(encoding) except UnicodeDecodeError: # pragma: no cover - raise ValueError( - 'The shebang (%r) is not decodable ' - 'from the script encoding (%r)' % (shebang, encoding)) + raise ValueError('The shebang (%r) is not decodable ' + 'from the script encoding (%r)' % (shebang, encoding)) return shebang def _get_script_text(self, entry): - return self.script_template % dict(module=entry.prefix, - import_name=entry.suffix.split('.')[0], - func=entry.suffix) + return self.script_template % dict( + module=entry.prefix, import_name=entry.suffix.split('.')[0], func=entry.suffix) manifest = _DEFAULT_MANIFEST @@ -221,9 +258,6 @@ def get_manifest(self, exename): def _write_script(self, names, shebang, script_bytes, filenames, ext): use_launcher = self.add_launchers and self._is_nt - linesep = os.linesep.encode('utf-8') - if not shebang.endswith(linesep): - shebang += linesep if not use_launcher: script_bytes = shebang + script_bytes else: # pragma: no cover @@ -233,7 +267,13 @@ def _write_script(self, names, shebang, script_bytes, filenames, ext): launcher = self._get_launcher('w') stream = BytesIO() with ZipFile(stream, 'w') as zf: - zf.writestr('__main__.py', script_bytes) + source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH') + if source_date_epoch: + date_time = time.gmtime(int(source_date_epoch))[:6] + zinfo = ZipInfo(filename='__main__.py', date_time=date_time) + zf.writestr(zinfo, script_bytes) + else: + zf.writestr('__main__.py', script_bytes) zip_data = stream.getvalue() script_bytes = launcher + shebang + zip_data for name in names: @@ -251,7 +291,7 @@ def _write_script(self, names, shebang, script_bytes, filenames, ext): 'use .deleteme logic') dfname = '%s.deleteme' % outname if os.path.exists(dfname): - os.remove(dfname) # Not allowed to fail here + os.remove(dfname) # Not allowed to fail here os.rename(outname, dfname) # nor here self._fileop.write_binary_file(outname, script_bytes) logger.debug('Able to replace executable using ' @@ -259,7 +299,7 @@ def _write_script(self, names, shebang, script_bytes, filenames, ext): try: os.remove(dfname) except Exception: - pass # still in use - ignore error + pass # still in use - ignore error else: if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover outname = '%s.%s' % (outname, ext) @@ -271,6 +311,18 @@ def _write_script(self, names, shebang, script_bytes, filenames, ext): self._fileop.set_executable_mode([outname]) filenames.append(outname) + variant_separator = '-' + + def get_script_filenames(self, name): + result = set() + if '' in self.variants: + result.add(name) + if 'X' in self.variants: + result.add('%s%s' % (name, self.version_info[0])) + if 'X.Y' in self.variants: + result.add('%s%s%s.%s' % (name, self.variant_separator, self.version_info[0], self.version_info[1])) + return result + def _make_script(self, entry, filenames, options=None): post_interp = b'' if options: @@ -280,14 +332,7 @@ def _make_script(self, entry, filenames, options=None): post_interp = args.encode('utf-8') shebang = self._get_shebang('utf-8', post_interp, options=options) script = self._get_script_text(entry).encode('utf-8') - name = entry.name - scriptnames = set() - if '' in self.variants: - scriptnames.add(name) - if 'X' in self.variants: - scriptnames.add('%s%s' % (name, sys.version[0])) - if 'X.Y' in self.variants: - scriptnames.add('%s-%s' % (name, sys.version[:3])) + scriptnames = self.get_script_filenames(entry.name) if options and options.get('gui', False): ext = 'pyw' else: @@ -314,8 +359,7 @@ def _copy_script(self, script, filenames): else: first_line = f.readline() if not first_line: # pragma: no cover - logger.warning('%s: %s is an empty file (skipping)', - self.get_command_name(), script) + logger.warning('%s is an empty file (skipping)', script) return match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n')) @@ -331,8 +375,7 @@ def _copy_script(self, script, filenames): self._fileop.set_executable_mode([outname]) filenames.append(outname) else: - logger.info('copying and adjusting %s -> %s', script, - self.target_dir) + logger.info('copying and adjusting %s -> %s', script, self.target_dir) if not self._fileop.dry_run: encoding, lines = detect_encoding(f.readline) f.seek(0) @@ -359,16 +402,17 @@ def dry_run(self, value): # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/ def _get_launcher(self, kind): - if struct.calcsize('P') == 8: # 64-bit + if struct.calcsize('P') == 8: # 64-bit bits = '64' else: bits = '32' - name = '%s%s.exe' % (kind, bits) - # Issue 31: don't hardcode an absolute package name, but - # determine it relative to the current package - distlib_package = __name__.rsplit('.', 1)[0] - result = finder(distlib_package).find(name).bytes - return result + platform_suffix = '-arm' if get_platform() == 'win-arm64' else '' + name = '%s%s%s.exe' % (kind, bits, platform_suffix) + if name not in WRAPPERS: + msg = ('Unable to find resource %s in package %s' % + (name, DISTLIB_PACKAGE)) + raise ValueError(msg) + return WRAPPERS[name] # Public API follows diff --git a/src/rez/vendor/distlib/t32.exe b/src/rez/vendor/distlib/t32.exe index 5d5bce1f4..52154f0be 100644 Binary files a/src/rez/vendor/distlib/t32.exe and b/src/rez/vendor/distlib/t32.exe differ diff --git a/src/rez/vendor/distlib/t64-arm.exe b/src/rez/vendor/distlib/t64-arm.exe new file mode 100644 index 000000000..e1ab8f8f5 Binary files /dev/null and b/src/rez/vendor/distlib/t64-arm.exe differ diff --git a/src/rez/vendor/distlib/t64.exe b/src/rez/vendor/distlib/t64.exe index 039ce441b..e8bebdba6 100644 Binary files a/src/rez/vendor/distlib/t64.exe and b/src/rez/vendor/distlib/t64.exe differ diff --git a/src/rez/vendor/distlib/util.py b/src/rez/vendor/distlib/util.py index ee647d310..0d5bd7a8b 100644 --- a/src/rez/vendor/distlib/util.py +++ b/src/rez/vendor/distlib/util.py @@ -1,5 +1,5 @@ # -# Copyright (C) 2012-2017 The Python Software Foundation. +# Copyright (C) 2012-2023 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # import codecs @@ -31,11 +31,9 @@ import time from . import DistlibException -from .compat import (string_types, text_type, shutil, raw_input, StringIO, - cache_from_source, urlopen, urljoin, httplib, xmlrpclib, - splittype, HTTPHandler, BaseConfigurator, valid_ident, - Container, configparser, URLError, ZipFile, fsdecode, - unquote, urlparse) +from .compat import (string_types, text_type, shutil, raw_input, StringIO, cache_from_source, urlopen, urljoin, httplib, + xmlrpclib, HTTPHandler, BaseConfigurator, valid_ident, Container, configparser, URLError, ZipFile, + fsdecode, unquote, urlparse) logger = logging.getLogger(__name__) @@ -62,6 +60,7 @@ def parse_marker(marker_string): interpreted as a literal string, and a string not contained in quotes is a variable (such as os_name). """ + def marker_var(remaining): # either identifier, or literal string m = IDENTIFIER.match(remaining) @@ -95,7 +94,7 @@ def marker_var(remaining): raise SyntaxError('unterminated string: %s' % s) parts.append(q) result = ''.join(parts) - remaining = remaining[1:].lstrip() # skip past closing quote + remaining = remaining[1:].lstrip() # skip past closing quote return result, remaining def marker_expr(remaining): @@ -215,13 +214,10 @@ def get_versions(ver_remaining): if not ver_remaining or ver_remaining[0] != ',': break ver_remaining = ver_remaining[1:].lstrip() - - # https://github.com/AcademySoftwareFoundation/rez/pull/1092 - # Some packages have a trailing comma which would break the matching + # Some packages have a trailing comma which would break things + # See issue #148 if not ver_remaining: break - # /end pull/1092 - m = COMPARE_OP.match(ver_remaining) if not m: raise SyntaxError('invalid constraint: %s' % ver_remaining) @@ -266,8 +262,7 @@ def get_versions(ver_remaining): rs = distname else: rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions])) - return Container(name=distname, extras=extras, constraints=versions, - marker=mark_expr, url=uri, requirement=rs) + return Container(name=distname, extras=extras, constraints=versions, marker=mark_expr, url=uri, requirement=rs) def get_resources_dests(resources_root, rules): @@ -307,16 +302,18 @@ def in_venv(): def get_executable(): -# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as -# changes to the stub launcher mean that sys.executable always points -# to the stub on OS X -# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' -# in os.environ): -# result = os.environ['__PYVENV_LAUNCHER__'] -# else: -# result = sys.executable -# return result - result = os.path.normcase(sys.executable) + # The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as + # changes to the stub launcher mean that sys.executable always points + # to the stub on OS X + # if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__' + # in os.environ): + # result = os.environ['__PYVENV_LAUNCHER__'] + # else: + # result = sys.executable + # return result + # Avoid normcasing: see issue #143 + # result = os.path.normcase(sys.executable) + result = sys.executable if not isinstance(result, text_type): result = fsdecode(result) return result @@ -347,6 +344,7 @@ def extract_by_key(d, keys): result[key] = d[key] return result + def read_exports(stream): if sys.version_info[0] >= 3: # needs to be a text stream @@ -389,7 +387,7 @@ def read_stream(cp, stream): s = '%s = %s' % (name, value) entry = get_export_entry(s) assert entry is not None - #entry.dist = self + # entry.dist = self entries[name] = entry return result @@ -421,6 +419,7 @@ def tempdir(): finally: shutil.rmtree(td) + @contextlib.contextmanager def chdir(d): cwd = os.getcwd() @@ -442,19 +441,21 @@ def socket_timeout(seconds=15): class cached_property(object): + def __init__(self, func): self.func = func - #for attr in ('__name__', '__module__', '__doc__'): - # setattr(self, attr, getattr(func, attr, None)) + # for attr in ('__name__', '__module__', '__doc__'): + # setattr(self, attr, getattr(func, attr, None)) def __get__(self, obj, cls=None): if obj is None: return self value = self.func(obj) object.__setattr__(obj, self.func.__name__, value) - #obj.__dict__[self.func.__name__] = value = self.func(obj) + # obj.__dict__[self.func.__name__] = value = self.func(obj) return value + def convert_path(pathname): """Return 'pathname' as a name that will work on the native filesystem. @@ -483,6 +484,7 @@ def convert_path(pathname): class FileOperator(object): + def __init__(self, dry_run=False): self.dry_run = dry_run self.ensured = set() @@ -510,8 +512,7 @@ def newer(self, source, target): second will have the same "age". """ if not os.path.exists(source): - raise DistlibException("file '%r' does not exist" % - os.path.abspath(source)) + raise DistlibException("file '%r' does not exist" % os.path.abspath(source)) if not os.path.exists(target): return True @@ -599,8 +600,10 @@ def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_in diagpath = path[len(prefix):] compile_kwargs = {} if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'): - compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH - py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error + if not isinstance(hashed_invalidation, py_compile.PycInvalidationMode): + hashed_invalidation = py_compile.PycInvalidationMode.CHECKED_HASH + compile_kwargs['invalidation_mode'] = hashed_invalidation + py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error self.record_as_written(dpath) return dpath @@ -662,9 +665,10 @@ def rollback(self): assert flist == ['__pycache__'] sd = os.path.join(d, flist[0]) os.rmdir(sd) - os.rmdir(d) # should fail if non-empty + os.rmdir(d) # should fail if non-empty self._init_record() + def resolve(module_name, dotted_path): if module_name in sys.modules: mod = sys.modules[module_name] @@ -681,6 +685,7 @@ def resolve(module_name, dotted_path): class ExportEntry(object): + def __init__(self, name, prefix, suffix, flags): self.name = name self.prefix = prefix @@ -692,27 +697,26 @@ def value(self): return resolve(self.prefix, self.suffix) def __repr__(self): # pragma: no cover - return '' % (self.name, self.prefix, - self.suffix, self.flags) + return '' % (self.name, self.prefix, self.suffix, self.flags) def __eq__(self, other): if not isinstance(other, ExportEntry): result = False else: - result = (self.name == other.name and - self.prefix == other.prefix and - self.suffix == other.suffix and + result = (self.name == other.name and self.prefix == other.prefix and self.suffix == other.suffix and self.flags == other.flags) return result __hash__ = object.__hash__ -ENTRY_RE = re.compile(r'''(?P(\w|[-.+])+) +ENTRY_RE = re.compile( + r'''(?P([^\[]\S*)) \s*=\s*(?P(\w+)([:\.]\w+)*) - \s*(\[\s*(?P\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? + \s*(\[\s*(?P[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])? ''', re.VERBOSE) + def get_export_entry(specification): m = ENTRY_RE.search(specification) if not m: @@ -785,7 +789,7 @@ def get_cache_base(suffix=None): return os.path.join(result, suffix) -def path_to_cache_dir(path): +def path_to_cache_dir(path, use_abspath=True): """ Convert an absolute path to a directory name for use in a cache. @@ -795,7 +799,7 @@ def path_to_cache_dir(path): #. Any occurrence of ``os.sep`` is replaced with ``'--'``. #. ``'.cache'`` is appended. """ - d, p = os.path.splitdrive(os.path.abspath(path)) + d, p = os.path.splitdrive(os.path.abspath(path) if use_abspath else path) if d: d = d.replace(':', '---') p = p.replace(os.sep, '--') @@ -828,6 +832,7 @@ def get_process_umask(): os.umask(result) return result + def is_string_sequence(seq): result = True i = None @@ -838,6 +843,7 @@ def is_string_sequence(seq): assert i is not None return result + PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-' '([a-z0-9_.+-]+)', re.I) PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)') @@ -867,10 +873,12 @@ def split_filename(filename, project_name=None): result = m.group(1), m.group(3), pyver return result + # Allow spaces in name because of legacy dists like "Twisted Core" NAME_VERSION_RE = re.compile(r'(?P[\w .-]+)\s*' r'\(\s*(?P[^\s)]+)\)$') + def parse_name_and_version(p): """ A utility method used to get name and version from a string. @@ -886,6 +894,7 @@ def parse_name_and_version(p): d = m.groupdict() return d['name'].strip().lower(), d['ver'] + def get_extras(requested, available): result = set() requested = set(requested or []) @@ -907,10 +916,13 @@ def get_extras(requested, available): logger.warning('undeclared extra: %s' % r) result.add(r) return result + + # # Extended metadata functionality # + def _get_external_data(url): result = {} try: @@ -924,21 +936,24 @@ def _get_external_data(url): logger.debug('Unexpected response for JSON request: %s', ct) else: reader = codecs.getreader('utf-8')(resp) - #data = reader.read().decode('utf-8') - #result = json.loads(data) + # data = reader.read().decode('utf-8') + # result = json.loads(data) result = json.load(reader) except Exception as e: logger.exception('Failed to get external data for %s: %s', url, e) return result + _external_data_base_url = 'https://www.red-dove.com/pypi/projects/' + def get_project_data(name): url = '%s/%s/project.json' % (name[0].upper(), name) url = urljoin(_external_data_base_url, url) result = _get_external_data(url) return result + def get_package_data(name, version): url = '%s/%s/package-%s.json' % (name[0].upper(), name, version) url = urljoin(_external_data_base_url, url) @@ -966,11 +981,11 @@ def __init__(self, base): logger.warning('Directory \'%s\' is not private', base) self.base = os.path.abspath(os.path.normpath(base)) - def prefix_to_dir(self, prefix): + def prefix_to_dir(self, prefix, use_abspath=True): """ Converts a resource prefix to a directory name in the cache. """ - return path_to_cache_dir(prefix) + return path_to_cache_dir(prefix, use_abspath=use_abspath) def clear(self): """ @@ -993,6 +1008,7 @@ class EventMixin(object): """ A very simple publish/subscribe system. """ + def __init__(self): self._subscribers = {} @@ -1054,18 +1070,19 @@ def publish(self, event, *args, **kwargs): logger.exception('Exception during event publication') value = None result.append(value) - logger.debug('publish %s: args = %s, kwargs = %s, result = %s', - event, args, kwargs, result) + logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result) return result + # # Simple sequencing # class Sequencer(object): + def __init__(self): self._preds = {} self._succs = {} - self._nodes = set() # nodes with no preds/succs + self._nodes = set() # nodes with no preds/succs def add_node(self, node): self._nodes.add(node) @@ -1105,8 +1122,7 @@ def remove(self, pred, succ): raise ValueError('%r not a successor of %r' % (succ, pred)) def is_step(self, step): - return (step in self._preds or step in self._succs or - step in self._nodes) + return (step in self._preds or step in self._succs or step in self._nodes) def get_steps(self, final): if not self.is_step(final): @@ -1135,7 +1151,7 @@ def get_steps(self, final): @property def strong_connections(self): - #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm + # http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm index_counter = [0] stack = [] lowlinks = {} @@ -1160,11 +1176,11 @@ def strongconnect(node): if successor not in lowlinks: # Successor has not yet been visited strongconnect(successor) - lowlinks[node] = min(lowlinks[node],lowlinks[successor]) + lowlinks[node] = min(lowlinks[node], lowlinks[successor]) elif successor in stack: # the successor is in the stack and hence in the current # strongly connected component (SCC) - lowlinks[node] = min(lowlinks[node],index[successor]) + lowlinks[node] = min(lowlinks[node], index[successor]) # If `node` is a root node, pop the stack and generate an SCC if lowlinks[node] == index[node]: @@ -1173,7 +1189,8 @@ def strongconnect(node): while True: successor = stack.pop() connected_component.append(successor) - if successor == node: break + if successor == node: + break component = tuple(connected_component) # storing the result result.append(component) @@ -1196,12 +1213,13 @@ def dot(self): result.append('}') return '\n'.join(result) + # # Unarchiving functionality for zip, tar, tgz, tbz, whl # -ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', - '.tgz', '.tbz', '.whl') +ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz', '.whl') + def unarchive(archive_filename, dest_dir, format=None, check=True): @@ -1250,6 +1268,20 @@ def check_path(path): for tarinfo in archive.getmembers(): if not isinstance(tarinfo.name, text_type): tarinfo.name = tarinfo.name.decode('utf-8') + + # Limit extraction of dangerous items, if this Python + # allows it easily. If not, just trust the input. + # See: https://docs.python.org/3/library/tarfile.html#extraction-filters + def extraction_filter(member, path): + """Run tarfile.tar_filter, but raise the expected ValueError""" + # This is only called if the current Python has tarfile filters + try: + return tarfile.tar_filter(member, path) + except tarfile.FilterError as exc: + raise ValueError(str(exc)) + + archive.extraction_filter = extraction_filter + archive.extractall(dest_dir) finally: @@ -1270,11 +1302,12 @@ def zip_dir(directory): zf.write(full, dest) return result + # # Simple progress bar # -UNITS = ('', 'K', 'M', 'G','T','P') +UNITS = ('', 'K', 'M', 'G', 'T', 'P') class Progress(object): @@ -1329,8 +1362,8 @@ def percentage(self): def format_duration(self, duration): if (duration <= 0) and self.max is None or self.cur == self.min: result = '??:??:??' - #elif duration < 1: - # result = '--:--:--' + # elif duration < 1: + # result = '--:--:--' else: result = time.strftime('%H:%M:%S', time.gmtime(duration)) return result @@ -1340,7 +1373,7 @@ def ETA(self): if self.done: prefix = 'Done' t = self.elapsed - #import pdb; pdb.set_trace() + # import pdb; pdb.set_trace() else: prefix = 'ETA ' if self.max is None: @@ -1348,7 +1381,7 @@ def ETA(self): elif self.elapsed == 0 or (self.cur == self.min): t = 0 else: - #import pdb; pdb.set_trace() + # import pdb; pdb.set_trace() t = float(self.max - self.min) t /= self.cur - self.min t = (t - 1) * self.elapsed @@ -1366,6 +1399,7 @@ def speed(self): result /= 1000.0 return '%d %sB/s' % (result, unit) + # # Glob functionality # @@ -1413,18 +1447,17 @@ def _iglob(path_glob): for fn in _iglob(os.path.join(path, radical)): yield fn -if ssl: - from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, - CertificateError) +if ssl: + from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname, CertificateError) -# -# HTTPSConnection which verifies certificates/matches domains -# + # + # HTTPSConnection which verifies certificates/matches domains + # class HTTPSConnection(httplib.HTTPSConnection): - ca_certs = None # set this to the path to the certs file (.pem) - check_domain = True # only used if ca_certs is not None + ca_certs = None # set this to the path to the certs file (.pem) + check_domain = True # only used if ca_certs is not None # noinspection PyPropertyAccess def connect(self): @@ -1433,28 +1466,19 @@ def connect(self): self.sock = sock self._tunnel() - if not hasattr(ssl, 'SSLContext'): - # For 2.x - if self.ca_certs: - cert_reqs = ssl.CERT_REQUIRED - else: - cert_reqs = ssl.CERT_NONE - self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, - cert_reqs=cert_reqs, - ssl_version=ssl.PROTOCOL_SSLv23, - ca_certs=self.ca_certs) - else: # pragma: no cover - context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + context = ssl.SSLContext(ssl.PROTOCOL_SSLv23) + if hasattr(ssl, 'OP_NO_SSLv2'): context.options |= ssl.OP_NO_SSLv2 - if self.cert_file: - context.load_cert_chain(self.cert_file, self.key_file) - kwargs = {} - if self.ca_certs: - context.verify_mode = ssl.CERT_REQUIRED - context.load_verify_locations(cafile=self.ca_certs) - if getattr(ssl, 'HAS_SNI', False): - kwargs['server_hostname'] = self.host - self.sock = context.wrap_socket(sock, **kwargs) + if getattr(self, 'cert_file', None): + context.load_cert_chain(self.cert_file, self.key_file) + kwargs = {} + if self.ca_certs: + context.verify_mode = ssl.CERT_REQUIRED + context.load_verify_locations(cafile=self.ca_certs) + if getattr(ssl, 'HAS_SNI', False): + kwargs['server_hostname'] = self.host + + self.sock = context.wrap_socket(sock, **kwargs) if self.ca_certs and self.check_domain: try: match_hostname(self.sock.getpeercert(), self.host) @@ -1465,6 +1489,7 @@ def connect(self): raise class HTTPSHandler(BaseHTTPSHandler): + def __init__(self, ca_certs, check_domain=True): BaseHTTPSHandler.__init__(self) self.ca_certs = ca_certs @@ -1506,50 +1531,33 @@ def https_open(self, req): # handler for HTTP itself. # class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler): + def http_open(self, req): raise URLError('Unexpected HTTP request on what should be a secure ' 'connection: %s' % req) + # # XML-RPC with timeouts # - -_ver_info = sys.version_info[:2] - -if _ver_info == (2, 6): - class HTTP(httplib.HTTP): - def __init__(self, host='', port=None, **kwargs): - if port == 0: # 0 means use port 0, not the default port - port = None - self._setup(self._connection_class(host, port, **kwargs)) - - - if ssl: - class HTTPS(httplib.HTTPS): - def __init__(self, host='', port=None, **kwargs): - if port == 0: # 0 means use port 0, not the default port - port = None - self._setup(self._connection_class(host, port, **kwargs)) - - class Transport(xmlrpclib.Transport): + def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.Transport.__init__(self, use_datetime) def make_connection(self, host): h, eh, x509 = self.get_host_info(host) - if _ver_info == (2, 6): - result = HTTP(h, timeout=self.timeout) - else: - if not self._connection or host != self._connection[0]: - self._extra_headers = eh - self._connection = host, httplib.HTTPConnection(h) - result = self._connection[1] - return result + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPConnection(h) + return self._connection[1] + if ssl: + class SafeTransport(xmlrpclib.SafeTransport): + def __init__(self, timeout, use_datetime=0): self.timeout = timeout xmlrpclib.SafeTransport.__init__(self, use_datetime) @@ -1559,24 +1567,21 @@ def make_connection(self, host): if not kwargs: kwargs = {} kwargs['timeout'] = self.timeout - if _ver_info == (2, 6): - result = HTTPS(host, None, **kwargs) - else: - if not self._connection or host != self._connection[0]: - self._extra_headers = eh - self._connection = host, httplib.HTTPSConnection(h, None, - **kwargs) - result = self._connection[1] - return result + if not self._connection or host != self._connection[0]: + self._extra_headers = eh + self._connection = host, httplib.HTTPSConnection(h, None, **kwargs) + return self._connection[1] class ServerProxy(xmlrpclib.ServerProxy): + def __init__(self, uri, **kwargs): self.timeout = timeout = kwargs.pop('timeout', None) # The above classes only come into play if a timeout # is specified if timeout is not None: - scheme, _ = splittype(uri) + # scheme = splittype(uri) # deprecated as of Python 3.8 + scheme = urlparse(uri)[0] use_datetime = kwargs.get('use_datetime', 0) if scheme == 'https': tcls = SafeTransport @@ -1586,11 +1591,13 @@ def __init__(self, uri, **kwargs): self.transport = t xmlrpclib.ServerProxy.__init__(self, uri, **kwargs) + # # CSV functionality. This is provided because on 2.x, the csv module can't # handle Unicode. However, we need to deal with Unicode in e.g. RECORD files. # + def _csv_open(fn, mode, **kwargs): if sys.version_info[0] < 3: mode += 'b' @@ -1604,9 +1611,9 @@ def _csv_open(fn, mode, **kwargs): class CSVBase(object): defaults = { - 'delimiter': str(','), # The strs are used because we need native - 'quotechar': str('"'), # str in the csv API (2.x won't take - 'lineterminator': str('\n') # Unicode) + 'delimiter': str(','), # The strs are used because we need native + 'quotechar': str('"'), # str in the csv API (2.x won't take + 'lineterminator': str('\n') # Unicode) } def __enter__(self): @@ -1617,6 +1624,7 @@ def __exit__(self, *exc_info): class CSVReader(CSVBase): + def __init__(self, **kwargs): if 'stream' in kwargs: stream = kwargs['stream'] @@ -1641,7 +1649,9 @@ def next(self): __next__ = next + class CSVWriter(CSVBase): + def __init__(self, fn, **kwargs): self.stream = _csv_open(fn, 'w') self.writer = csv.writer(self.stream, **self.defaults) @@ -1656,10 +1666,12 @@ def writerow(self, row): row = r self.writer.writerow(row) + # # Configurator functionality # + class Configurator(BaseConfigurator): value_converters = dict(BaseConfigurator.value_converters) @@ -1670,6 +1682,7 @@ def __init__(self, config, base=None): self.base = base or os.getcwd() def configure_custom(self, config): + def convert(o): if isinstance(o, (list, tuple)): result = type(o)([convert(i) for i in o]) @@ -1719,6 +1732,7 @@ class SubprocessMixin(object): """ Mixin for running subprocesses and capturing their output """ + def __init__(self, verbose=False, progress=None): self.verbose = verbose self.progress = progress @@ -1745,8 +1759,7 @@ def reader(self, stream, context): stream.close() def run_command(self, cmd, **kwargs): - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, **kwargs) + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout')) t1.start() t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr')) @@ -1765,3 +1778,207 @@ def normalize_name(name): """Normalize a python package name a la PEP 503""" # https://www.python.org/dev/peps/pep-0503/#normalized-names return re.sub('[-_.]+', '-', name).lower() + + +# def _get_pypirc_command(): +# """ +# Get the distutils command for interacting with PyPI configurations. +# :return: the command. +# """ +# from distutils.core import Distribution +# from distutils.config import PyPIRCCommand +# d = Distribution() +# return PyPIRCCommand(d) + + +class PyPIRCFile(object): + + DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/' + DEFAULT_REALM = 'pypi' + + def __init__(self, fn=None, url=None): + if fn is None: + fn = os.path.join(os.path.expanduser('~'), '.pypirc') + self.filename = fn + self.url = url + + def read(self): + result = {} + + if os.path.exists(self.filename): + repository = self.url or self.DEFAULT_REPOSITORY + + config = configparser.RawConfigParser() + config.read(self.filename) + sections = config.sections() + if 'distutils' in sections: + # let's get the list of servers + index_servers = config.get('distutils', 'index-servers') + _servers = [server.strip() for server in index_servers.split('\n') if server.strip() != ''] + if _servers == []: + # nothing set, let's try to get the default pypi + if 'pypi' in sections: + _servers = ['pypi'] + else: + for server in _servers: + result = {'server': server} + result['username'] = config.get(server, 'username') + + # optional params + for key, default in (('repository', self.DEFAULT_REPOSITORY), ('realm', self.DEFAULT_REALM), + ('password', None)): + if config.has_option(server, key): + result[key] = config.get(server, key) + else: + result[key] = default + + # work around people having "repository" for the "pypi" + # section of their config set to the HTTP (rather than + # HTTPS) URL + if (server == 'pypi' and repository in (self.DEFAULT_REPOSITORY, 'pypi')): + result['repository'] = self.DEFAULT_REPOSITORY + elif (result['server'] != repository and result['repository'] != repository): + result = {} + elif 'server-login' in sections: + # old format + server = 'server-login' + if config.has_option(server, 'repository'): + repository = config.get(server, 'repository') + else: + repository = self.DEFAULT_REPOSITORY + result = { + 'username': config.get(server, 'username'), + 'password': config.get(server, 'password'), + 'repository': repository, + 'server': server, + 'realm': self.DEFAULT_REALM + } + return result + + def update(self, username, password): + # import pdb; pdb.set_trace() + config = configparser.RawConfigParser() + fn = self.filename + config.read(fn) + if not config.has_section('pypi'): + config.add_section('pypi') + config.set('pypi', 'username', username) + config.set('pypi', 'password', password) + with open(fn, 'w') as f: + config.write(f) + + +def _load_pypirc(index): + """ + Read the PyPI access configuration as supported by distutils. + """ + return PyPIRCFile(url=index.url).read() + + +def _store_pypirc(index): + PyPIRCFile().update(index.username, index.password) + + +# +# get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor +# tweaks +# + + +def get_host_platform(): + """Return a string that identifies the current platform. This is used mainly to + distinguish platform-specific build directories and platform-specific built + distributions. Typically includes the OS name and version and the + architecture (as supplied by 'os.uname()'), although the exact information + included depends on the OS; eg. on Linux, the kernel version isn't + particularly important. + + Examples of returned values: + linux-i586 + linux-alpha (?) + solaris-2.6-sun4u + + Windows will return one of: + win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) + win32 (all others - specifically, sys.platform is returned) + + For other non-POSIX platforms, currently just returns 'sys.platform'. + + """ + if os.name == 'nt': + if 'amd64' in sys.version.lower(): + return 'win-amd64' + if '(arm)' in sys.version.lower(): + return 'win-arm32' + if '(arm64)' in sys.version.lower(): + return 'win-arm64' + return sys.platform + + # Set for cross builds explicitly + if "_PYTHON_HOST_PLATFORM" in os.environ: + return os.environ["_PYTHON_HOST_PLATFORM"] + + if os.name != 'posix' or not hasattr(os, 'uname'): + # XXX what about the architecture? NT is Intel or Alpha, + # Mac OS is M68k or PPC, etc. + return sys.platform + + # Try to distinguish various flavours of Unix + + (osname, host, release, version, machine) = os.uname() + + # Convert the OS name to lowercase, remove '/' characters, and translate + # spaces (for "Power Macintosh") + osname = osname.lower().replace('/', '') + machine = machine.replace(' ', '_').replace('/', '-') + + if osname[:5] == 'linux': + # At least on Linux/Intel, 'machine' is the processor -- + # i386, etc. + # XXX what about Alpha, SPARC, etc? + return "%s-%s" % (osname, machine) + + elif osname[:5] == 'sunos': + if release[0] >= '5': # SunOS 5 == Solaris 2 + osname = 'solaris' + release = '%d.%s' % (int(release[0]) - 3, release[2:]) + # We can't use 'platform.architecture()[0]' because a + # bootstrap problem. We use a dict to get an error + # if some suspicious happens. + bitness = {2147483647: '32bit', 9223372036854775807: '64bit'} + machine += '.%s' % bitness[sys.maxsize] + # fall through to standard osname-release-machine representation + elif osname[:3] == 'aix': + from _aix_support import aix_platform + return aix_platform() + elif osname[:6] == 'cygwin': + osname = 'cygwin' + rel_re = re.compile(r'[\d.]+', re.ASCII) + m = rel_re.match(release) + if m: + release = m.group() + elif osname[:6] == 'darwin': + import _osx_support + try: + from distutils import sysconfig + except ImportError: + import sysconfig + osname, release, machine = _osx_support.get_platform_osx(sysconfig.get_config_vars(), osname, release, machine) + + return '%s-%s-%s' % (osname, release, machine) + + +_TARGET_TO_PLAT = { + 'x86': 'win32', + 'x64': 'win-amd64', + 'arm': 'win-arm32', +} + + +def get_platform(): + if os.name != 'nt': + return get_host_platform() + cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH') + if cross_compilation_target not in _TARGET_TO_PLAT: + return get_host_platform() + return _TARGET_TO_PLAT[cross_compilation_target] diff --git a/src/rez/vendor/distlib/version.py b/src/rez/vendor/distlib/version.py index 3eebe18ee..d70a96ef5 100644 --- a/src/rez/vendor/distlib/version.py +++ b/src/rez/vendor/distlib/version.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2012-2017 The Python Software Foundation. +# Copyright (C) 2012-2023 The Python Software Foundation. # See LICENSE.txt and CONTRIBUTORS.txt. # """ @@ -176,9 +176,9 @@ def __str__(self): return self._string -PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?' - r'(\.(post)(\d+))?(\.(dev)(\d+))?' - r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$') +PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|alpha|b|beta|c|rc|pre|preview)(\d+)?)?' + r'(\.(post|r|rev)(\d+)?)?([._-]?(dev)(\d+)?)?' + r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$', re.I) def _pep_440_key(s): @@ -194,7 +194,7 @@ def _pep_440_key(s): if not groups[0]: epoch = 0 else: - epoch = int(groups[0]) + epoch = int(groups[0][:-1]) pre = groups[4:6] post = groups[7:9] dev = groups[10:12] @@ -202,15 +202,24 @@ def _pep_440_key(s): if pre == (None, None): pre = () else: - pre = pre[0], int(pre[1]) + if pre[1] is None: + pre = pre[0], 0 + else: + pre = pre[0], int(pre[1]) if post == (None, None): post = () else: - post = post[0], int(post[1]) + if post[1] is None: + post = post[0], 0 + else: + post = post[0], int(post[1]) if dev == (None, None): dev = () else: - dev = dev[0], int(dev[1]) + if dev[1] is None: + dev = dev[0], 0 + else: + dev = dev[0], int(dev[1]) if local is None: local = () else: @@ -238,7 +247,6 @@ def _pep_440_key(s): if not dev: dev = ('final',) - #print('%s -> %s' % (s, m.groups())) return epoch, nums, pre, post, dev, local @@ -378,6 +386,7 @@ def _match_compatible(self, version, constraint, prefix): pfx = '.'.join([str(i) for i in release_clause]) return _match_prefix(version, pfx) + _REPLACEMENTS = ( (re.compile('[.+-]$'), ''), # remove trailing puncts (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start @@ -388,7 +397,7 @@ def _match_compatible(self, version, constraint, prefix): (re.compile('[.]{2,}'), '.'), # multiple runs of '.' (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha (re.compile(r'\b(pre-alpha|prealpha)\b'), - 'pre.alpha'), # standardise + 'pre.alpha'), # standardise (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses ) @@ -416,7 +425,7 @@ def _suggest_semantic_version(s): # Now look for numeric prefix, and separate it out from # the rest. - #import pdb; pdb.set_trace() + # import pdb; pdb.set_trace() m = _NUMERIC_PREFIX.match(result) if not m: prefix = '0.0.0' @@ -434,7 +443,7 @@ def _suggest_semantic_version(s): prefix = '.'.join([str(i) for i in prefix]) suffix = suffix.strip() if suffix: - #import pdb; pdb.set_trace() + # import pdb; pdb.set_trace() # massage the suffix. for pat, repl in _SUFFIX_REPLACEMENTS: suffix = pat.sub(repl, suffix) @@ -504,7 +513,7 @@ def _suggest_normalized_version(s): rs = rs[1:] # Clean leading '0's on numbers. - #TODO: unintended side-effect on, e.g., "2003.05.09" + # TODO: unintended side-effect on, e.g., "2003.05.09" # PyPI stats: 77 (~2%) better rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs) @@ -563,6 +572,7 @@ def _suggest_normalized_version(s): # Legacy version processing (distribute-compatible) # + _VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I) _VERSION_REPLACE = { 'pre': 'c', @@ -609,8 +619,7 @@ def parse(self, s): def is_prerelease(self): result = False for x in self._parts: - if (isinstance(x, string_types) and x.startswith('*') and - x < '*final'): + if (isinstance(x, string_types) and x.startswith('*') and x < '*final'): result = True break return result @@ -641,6 +650,7 @@ def _match_compatible(self, version, constraint, prefix): # Semantic versioning # + _SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)' r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?' r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I) @@ -710,6 +720,9 @@ def is_valid_constraint_list(self, s): """ Used for processing some metadata fields """ + # See issue #140. Be tolerant of a single trailing comma. + if s.endswith(','): + s = s[:-1] return self.is_valid_matcher('dummy_name (%s)' % s) def suggest(self, s): @@ -719,6 +732,7 @@ def suggest(self, s): result = self.suggester(s) return result + _SCHEMES = { 'normalized': VersionScheme(_normalized_key, NormalizedMatcher, _suggest_normalized_version), diff --git a/src/rez/vendor/distlib/w32.exe b/src/rez/vendor/distlib/w32.exe index 4df77001a..4ee2d3a31 100644 Binary files a/src/rez/vendor/distlib/w32.exe and b/src/rez/vendor/distlib/w32.exe differ diff --git a/src/rez/vendor/distlib/w64-arm.exe b/src/rez/vendor/distlib/w64-arm.exe new file mode 100644 index 000000000..951d5817c Binary files /dev/null and b/src/rez/vendor/distlib/w64-arm.exe differ diff --git a/src/rez/vendor/distlib/w64.exe b/src/rez/vendor/distlib/w64.exe index 63ce483d1..5763076d2 100644 Binary files a/src/rez/vendor/distlib/w64.exe and b/src/rez/vendor/distlib/w64.exe differ diff --git a/src/rez/vendor/distlib/wheel.py b/src/rez/vendor/distlib/wheel.py index 0c8efad9a..62ab10fb3 100644 --- a/src/rez/vendor/distlib/wheel.py +++ b/src/rez/vendor/distlib/wheel.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright (C) 2013-2017 Vinay Sajip. +# Copyright (C) 2013-2023 Vinay Sajip. # Licensed to the Python Software Foundation under a contributor agreement. # See LICENSE.txt and CONTRIBUTORS.txt. # @@ -9,10 +9,8 @@ import base64 import codecs import datetime -import distutils.util from email import message_from_file import hashlib -import imp import json import logging import os @@ -26,14 +24,14 @@ from . import __version__, DistlibException from .compat import sysconfig, ZipFile, fsdecode, text_type, filter from .database import InstalledDistribution -from .metadata import Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME -from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, - cached_property, get_cache_base, read_exports, tempdir) +from .metadata import Metadata, WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME +from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache, cached_property, get_cache_base, + read_exports, tempdir, get_platform) from .version import NormalizedVersion, UnsupportedVersionError logger = logging.getLogger(__name__) -cache = None # created when needed +cache = None # created when needed if hasattr(sys, 'pypy_version_info'): # pragma: no cover IMP_PREFIX = 'pp' @@ -45,30 +43,41 @@ IMP_PREFIX = 'cp' VER_SUFFIX = sysconfig.get_config_var('py_version_nodot') -if not VER_SUFFIX: # pragma: no cover +if not VER_SUFFIX: # pragma: no cover VER_SUFFIX = '%s%s' % sys.version_info[:2] PYVER = 'py' + VER_SUFFIX IMPVER = IMP_PREFIX + VER_SUFFIX -ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_') +ARCH = get_platform().replace('-', '_').replace('.', '_') ABI = sysconfig.get_config_var('SOABI') if ABI and ABI.startswith('cpython-'): - ABI = ABI.replace('cpython-', 'cp') + ABI = ABI.replace('cpython-', 'cp').split('-')[0] else: + def _derive_abi(): parts = ['cp', VER_SUFFIX] if sysconfig.get_config_var('Py_DEBUG'): parts.append('d') - if sysconfig.get_config_var('WITH_PYMALLOC'): - parts.append('m') - if sysconfig.get_config_var('Py_UNICODE_SIZE') == 4: - parts.append('u') + if IMP_PREFIX == 'cp': + vi = sys.version_info[:2] + if vi < (3, 8): + wpm = sysconfig.get_config_var('WITH_PYMALLOC') + if wpm is None: + wpm = True + if wpm: + parts.append('m') + if vi < (3, 3): + us = sysconfig.get_config_var('Py_UNICODE_SIZE') + if us == 4 or (us is None and sys.maxunicode == 0x10FFFF): + parts.append('u') return ''.join(parts) + ABI = _derive_abi() del _derive_abi -FILENAME_RE = re.compile(r''' +FILENAME_RE = re.compile( + r''' (?P[^-]+) -(?P\d+[^-]*) (-(?P\d+[^-]*))? @@ -94,8 +103,35 @@ def _derive_abi(): else: to_posix = lambda o: o.replace(os.sep, '/') +if sys.version_info[0] < 3: + import imp +else: + imp = None + import importlib.machinery + import importlib.util + + +def _get_suffixes(): + if imp: + return [s[0] for s in imp.get_suffixes()] + else: + return importlib.machinery.EXTENSION_SUFFIXES + + +def _load_dynamic(name, path): + # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly + if imp: + return imp.load_dynamic(name, path) + else: + spec = importlib.util.spec_from_file_location(name, path) + module = importlib.util.module_from_spec(spec) + sys.modules[name] = module + spec.loader.exec_module(module) + return module + class Mounter(object): + def __init__(self): self.impure_wheels = {} self.libs = {} @@ -123,13 +159,14 @@ def load_module(self, fullname): else: if fullname not in self.libs: raise ImportError('unable to find extension for %s' % fullname) - result = imp.load_dynamic(fullname, self.libs[fullname]) + result = _load_dynamic(fullname, self.libs[fullname]) result.__loader__ = self parts = fullname.rsplit('.', 1) if len(parts) > 1: result.__package__ = parts[0] return result + _hook = Mounter() @@ -196,8 +233,7 @@ def filename(self): arch = '.'.join(self.arch) # replace - with _ as a local version separator version = self.version.replace('-', '_') - return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, - pyver, abi, arch) + return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver, pyver, abi, arch) @property def exists(self): @@ -218,13 +254,15 @@ def metadata(self): info_dir = '%s.dist-info' % name_ver wrapper = codecs.getreader('utf-8') with ZipFile(pathname, 'r') as zf: - wheel_metadata = self.get_wheel_metadata(zf) - wv = wheel_metadata['Wheel-Version'].split('.', 1) - file_version = tuple([int(i) for i in wv]) - if file_version < (1, 1): - fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, 'METADATA'] - else: - fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME] + self.get_wheel_metadata(zf) + # wv = wheel_metadata['Wheel-Version'].split('.', 1) + # file_version = tuple([int(i) for i in wv]) + # if file_version < (1, 1): + # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME, + # LEGACY_METADATA_FILENAME] + # else: + # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME] + fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME] result = None for fn in fns: try: @@ -298,11 +336,9 @@ def get_hash(self, data, hash_kind=None): result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii') return hash_kind, result - def write_record(self, records, record_path, base): - records = list(records) # make a copy for sorting - p = to_posix(os.path.relpath(record_path, base)) - records.append((p, '', '')) - records.sort() + def write_record(self, records, record_path, archive_record_path): + records = list(records) # make a copy, as mutated + records.append((archive_record_path, '', '')) with CSVWriter(record_path) as writer: for row in records: writer.writerow(row) @@ -310,7 +346,7 @@ def write_record(self, records, record_path, base): def write_records(self, info, libdir, archive_paths): records = [] distinfo, info_dir = info - hasher = getattr(hashlib, self.hash_kind) + # hasher = getattr(hashlib, self.hash_kind) for ap, p in archive_paths: with open(p, 'rb') as f: data = f.read() @@ -319,8 +355,8 @@ def write_records(self, info, libdir, archive_paths): records.append((ap, digest, size)) p = os.path.join(distinfo, 'RECORD') - self.write_record(records, p, libdir) ap = to_posix(os.path.join(info_dir, 'RECORD')) + self.write_record(records, p, ap) archive_paths.append((ap, p)) def build_zip(self, pathname, archive_paths): @@ -425,6 +461,19 @@ def build(self, paths, tags=None, wheel_version=None): ap = to_posix(os.path.join(info_dir, 'WHEEL')) archive_paths.append((ap, p)) + # sort the entries by archive path. Not needed by any spec, but it + # keeps the archive listing and RECORD tidier than they would otherwise + # be. Use the number of path segments to keep directory entries together, + # and keep the dist-info stuff at the end. + def sorter(t): + ap = t[0] + n = ap.count('/') + if '.dist-info' in ap: + n += 10000 + return (n, ap) + + archive_paths = sorted(archive_paths, key=sorter) + # Now, at last, RECORD. # Paths in here are archive paths - nothing else makes sense. self.write_records((distinfo, info_dir), libdir, archive_paths) @@ -460,7 +509,7 @@ def install(self, paths, maker, **kwargs): installed, and the headers, scripts, data and dist-info metadata are not written. If kwarg ``bytecode_hashed_invalidation`` is True, written bytecode will try to use file-hash based invalidation (PEP-552) on - supported interpreter versions (CPython 2.7+). + supported interpreter versions (CPython 3.7+). The return value is a :class:`InstalledDistribution` instance unless ``options.lib_only`` is True, in which case the return value is ``None``. @@ -476,7 +525,7 @@ def install(self, paths, maker, **kwargs): data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver - metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME) wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') record_name = posixpath.join(info_dir, 'RECORD') @@ -510,11 +559,11 @@ def install(self, paths, maker, **kwargs): # make a new instance rather than a copy of maker's, # as we mutate it fileop = FileOperator(dry_run=dry_run) - fileop.record = True # so we can rollback if needed + fileop.record = True # so we can rollback if needed - bc = not sys.dont_write_bytecode # Double negatives. Lovely! + bc = not sys.dont_write_bytecode # Double negatives. Lovely! - outfiles = [] # for RECORD writing + outfiles = [] # for RECORD writing # for script copying/shebang processing workdir = tempfile.mkdtemp() @@ -548,8 +597,7 @@ def install(self, paths, maker, **kwargs): if lib_only and u_arcname.startswith((info_pfx, data_pfx)): logger.debug('lib_only: skipping %s', u_arcname) continue - is_script = (u_arcname.startswith(script_pfx) - and not u_arcname.endswith('.exe')) + is_script = (u_arcname.startswith(script_pfx) and not u_arcname.endswith('.exe')) if u_arcname.startswith(data_pfx): _, where, rp = u_arcname.split('/', 2) @@ -562,6 +610,13 @@ def install(self, paths, maker, **kwargs): if not is_script: with zf.open(arcname) as bf: fileop.copy_stream(bf, outfile) + # Issue #147: permission bits aren't preserved. Using + # zf.extract(zinfo, libdir) should have worked, but didn't, + # see https://www.thetopsites.net/article/53834422.shtml + # So ... manually preserve permission bits as given in zinfo + if os.name == 'posix': + # just set the normal permission bits + os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF) outfiles.append(outfile) # Double check the digest of the written file if not dry_run and row[1]: @@ -574,14 +629,12 @@ def install(self, paths, maker, **kwargs): '%s' % outfile) if bc and outfile.endswith('.py'): try: - pyc = fileop.byte_compile(outfile, - hashed_invalidation=bc_hashed_invalidation) + pyc = fileop.byte_compile(outfile, hashed_invalidation=bc_hashed_invalidation) outfiles.append(pyc) except Exception: # Don't give up if byte-compilation fails, # but log it and perhaps warn the user - logger.warning('Byte-compilation failed', - exc_info=True) + logger.warning('Byte-compilation failed', exc_info=True) else: fn = os.path.basename(convert_path(arcname)) workname = os.path.join(workdir, fn) @@ -619,7 +672,7 @@ def install(self, paths, maker, **kwargs): for v in epdata[k].values(): s = '%s:%s' % (v.prefix, v.suffix) if v.flags: - s += ' %s' % v.flags + s += ' [%s]' % ','.join(v.flags) d[v.name] = s except Exception: logger.warning('Unable to read legacy script ' @@ -650,7 +703,7 @@ def install(self, paths, maker, **kwargs): fileop.set_executable_mode(filenames) if gui_scripts: - options = {'gui': True } + options = {'gui': True} for k, v in gui_scripts.items(): script = '%s = %s' % (k, v) filenames = maker.make(script, options) @@ -660,7 +713,7 @@ def install(self, paths, maker, **kwargs): dist = InstalledDistribution(p) # Write SHARED - paths = dict(paths) # don't change passed in dict + paths = dict(paths) # don't change passed in dict del paths['purelib'] del paths['platlib'] paths['lib'] = libdir @@ -669,8 +722,7 @@ def install(self, paths, maker, **kwargs): outfiles.append(p) # Write RECORD - dist.write_installed_files(outfiles, paths['prefix'], - dry_run) + dist.write_installed_files(outfiles, paths['prefix'], dry_run) return dist except Exception: # pragma: no cover logger.exception('installation failed.') @@ -683,8 +735,7 @@ def _get_dylib_cache(self): global cache if cache is None: # Use native string to avoid issues on 2.x: see Python #20140. - base = os.path.join(get_cache_base(), str('dylib-cache'), - sys.version[:3]) + base = os.path.join(get_cache_base(), str('dylib-cache'), '%s.%s' % sys.version_info[:2]) cache = Cache(base) return cache @@ -701,7 +752,7 @@ def _get_extensions(self): wf = wrapper(bf) extensions = json.load(wf) cache = self._get_dylib_cache() - prefix = cache.prefix_to_dir(pathname) + prefix = cache.prefix_to_dir(self.filename, use_abspath=False) cache_base = os.path.join(cache.base, prefix) if not os.path.isdir(cache_base): os.makedirs(cache_base) @@ -732,7 +783,7 @@ def is_mountable(self): """ Determine if a wheel is asserted as mountable by its metadata. """ - return True # for now - metadata details TBD + return True # for now - metadata details TBD def mount(self, append=False): pathname = os.path.abspath(os.path.join(self.dirname, self.filename)) @@ -770,10 +821,10 @@ def unmount(self): def verify(self): pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) - data_dir = '%s.data' % name_ver + # data_dir = '%s.data' % name_ver info_dir = '%s.dist-info' % name_ver - metadata_name = posixpath.join(info_dir, METADATA_FILENAME) + # metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME) wheel_metadata_name = posixpath.join(info_dir, 'WHEEL') record_name = posixpath.join(info_dir, 'RECORD') @@ -782,9 +833,9 @@ def verify(self): with ZipFile(pathname, 'r') as zf: with zf.open(wheel_metadata_name) as bwf: wf = wrapper(bwf) - message = message_from_file(wf) - wv = message['Wheel-Version'].split('.', 1) - file_version = tuple([int(i) for i in wv]) + message_from_file(wf) + # wv = message['Wheel-Version'].split('.', 1) + # file_version = tuple([int(i) for i in wv]) # TODO version verification records = {} @@ -842,7 +893,7 @@ def update(self, modifier, dest_dir=None, **kwargs): def get_version(path_map, info_dir): version = path = None - key = '%s/%s' % (info_dir, METADATA_FILENAME) + key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME) if key not in path_map: key = '%s/PKG-INFO' % info_dir if key in path_map: @@ -853,25 +904,23 @@ def get_version(path_map, info_dir): def update_version(version, path): updated = None try: - v = NormalizedVersion(version) + NormalizedVersion(version) i = version.find('-') if i < 0: updated = '%s+1' % version else: parts = [int(s) for s in version[i + 1:].split('.')] parts[-1] += 1 - updated = '%s+%s' % (version[:i], - '.'.join(str(i) for i in parts)) + updated = '%s+%s' % (version[:i], '.'.join(str(i) for i in parts)) except UnsupportedVersionError: logger.debug('Cannot update non-compliant (PEP-440) ' 'version %r', version) if updated: md = Metadata(path=path) md.version = updated - legacy = not path.endswith(METADATA_FILENAME) + legacy = path.endswith(LEGACY_METADATA_FILENAME) md.write(path=path, legacy=legacy) - logger.debug('Version updated from %r to %r', version, - updated) + logger.debug('Version updated from %r to %r', version, updated) pathname = os.path.join(self.dirname, self.filename) name_ver = '%s-%s' % (self.name, self.version) @@ -907,9 +956,7 @@ def update_version(version, path): update_version(current_version, path) # Decide where the new wheel goes. if dest_dir is None: - fd, newpath = tempfile.mkstemp(suffix='.whl', - prefix='wheel-update-', - dir=workdir) + fd, newpath = tempfile.mkstemp(suffix='.whl', prefix='wheel-update-', dir=workdir) os.close(fd) else: if not os.path.isdir(dest_dir): @@ -924,17 +971,38 @@ def update_version(version, path): shutil.copyfile(newpath, pathname) return modified + +def _get_glibc_version(): + import platform + ver = platform.libc_ver() + result = [] + if ver[0] == 'glibc': + for s in ver[1].split('.'): + result.append(int(s) if s.isdigit() else 0) + result = tuple(result) + return result + + def compatible_tags(): """ Return (pyver, abi, arch) tuples compatible with this Python. """ - versions = [VER_SUFFIX] - major = VER_SUFFIX[0] - for minor in range(sys.version_info[1] - 1, - 1, -1): - versions.append(''.join([major, str(minor)])) + class _Version: + def __init__(self, major, minor): + self.major = major + self.major_minor = (major, minor) + self.string = ''.join((str(major), str(minor))) + + def __str__(self): + return self.string + + versions = [ + _Version(sys.version_info.major, minor_version) + for minor_version in range(sys.version_info.minor, -1, -1) + ] abis = [] - for suffix, _, _ in imp.get_suffixes(): + for suffix in _get_suffixes(): if suffix.startswith('.abi'): abis.append(suffix.split('.', 2)[1]) abis.sort() @@ -963,26 +1031,54 @@ def compatible_tags(): while minor >= 0: for match in matches: s = '%s_%s_%s_%s' % (name, major, minor, match) - if s != ARCH: # already there + if s != ARCH: # already there arches.append(s) minor -= 1 # Most specific - our Python version, ABI and arch - for abi in abis: - for arch in arches: - result.append((''.join((IMP_PREFIX, versions[0])), abi, arch)) + for i, version_object in enumerate(versions): + version = str(version_object) + add_abis = [] + + if i == 0: + add_abis = abis + + if IMP_PREFIX == 'cp' and version_object.major_minor >= (3, 2): + limited_api_abi = 'abi' + str(version_object.major) + if limited_api_abi not in add_abis: + add_abis.append(limited_api_abi) + + for abi in add_abis: + for arch in arches: + result.append((''.join((IMP_PREFIX, version)), abi, arch)) + # manylinux + if abi != 'none' and sys.platform.startswith('linux'): + arch = arch.replace('linux_', '') + parts = _get_glibc_version() + if len(parts) == 2: + if parts >= (2, 5): + result.append((''.join((IMP_PREFIX, version)), abi, 'manylinux1_%s' % arch)) + if parts >= (2, 12): + result.append((''.join((IMP_PREFIX, version)), abi, 'manylinux2010_%s' % arch)) + if parts >= (2, 17): + result.append((''.join((IMP_PREFIX, version)), abi, 'manylinux2014_%s' % arch)) + result.append((''.join( + (IMP_PREFIX, version)), abi, 'manylinux_%s_%s_%s' % (parts[0], parts[1], arch))) # where no ABI / arch dependency, but IMP_PREFIX dependency - for i, version in enumerate(versions): + for i, version_object in enumerate(versions): + version = str(version_object) result.append((''.join((IMP_PREFIX, version)), 'none', 'any')) if i == 0: result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any')) # no IMP_PREFIX, ABI or arch dependency - for i, version in enumerate(versions): + for i, version_object in enumerate(versions): + version = str(version_object) result.append((''.join(('py', version)), 'none', 'any')) if i == 0: result.append((''.join(('py', version[0])), 'none', 'any')) + return set(result) @@ -993,7 +1089,7 @@ def compatible_tags(): def is_compatible(wheel, tags=None): if not isinstance(wheel, Wheel): - wheel = Wheel(wheel) # assume it's a filename + wheel = Wheel(wheel) # assume it's a filename result = False if tags is None: tags = COMPATIBLE_TAGS diff --git a/src/rez/vendor/distro/__init__.py b/src/rez/vendor/distro/__init__.py index e69de29bb..7686fe85a 100644 --- a/src/rez/vendor/distro/__init__.py +++ b/src/rez/vendor/distro/__init__.py @@ -0,0 +1,54 @@ +from .distro import ( + NORMALIZED_DISTRO_ID, + NORMALIZED_LSB_ID, + NORMALIZED_OS_ID, + LinuxDistribution, + __version__, + build_number, + codename, + distro_release_attr, + distro_release_info, + id, + info, + like, + linux_distribution, + lsb_release_attr, + lsb_release_info, + major_version, + minor_version, + name, + os_release_attr, + os_release_info, + uname_attr, + uname_info, + version, + version_parts, +) + +__all__ = [ + "NORMALIZED_DISTRO_ID", + "NORMALIZED_LSB_ID", + "NORMALIZED_OS_ID", + "LinuxDistribution", + "build_number", + "codename", + "distro_release_attr", + "distro_release_info", + "id", + "info", + "like", + "linux_distribution", + "lsb_release_attr", + "lsb_release_info", + "major_version", + "minor_version", + "name", + "os_release_attr", + "os_release_info", + "uname_attr", + "uname_info", + "version", + "version_parts", +] + +__version__ = __version__ diff --git a/src/rez/vendor/distro/__main__.py b/src/rez/vendor/distro/__main__.py new file mode 100644 index 000000000..0c01d5b08 --- /dev/null +++ b/src/rez/vendor/distro/__main__.py @@ -0,0 +1,4 @@ +from .distro import main + +if __name__ == "__main__": + main() diff --git a/src/rez/vendor/distro/distro.py b/src/rez/vendor/distro/distro.py index 0611b62a3..78ccdfa40 100644 --- a/src/rez/vendor/distro/distro.py +++ b/src/rez/vendor/distro/distro.py @@ -1,4 +1,5 @@ -# Copyright 2015,2016,2017 Nir Cohen +#!/usr/bin/env python +# Copyright 2015-2021 Nir Cohen # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -20,26 +21,60 @@ It is the recommended replacement for Python's original :py:func:`platform.linux_distribution` function, but it provides much more functionality. An alternative implementation became necessary because Python -3.5 deprecated this function, and Python 3.8 will remove it altogether. -Its predecessor function :py:func:`platform.dist` was already -deprecated since Python 2.6 and will also be removed in Python 3.8. -Still, there are many cases in which access to OS distribution information -is needed. See `Python issue 1322 `_ for -more information. +3.5 deprecated this function, and Python 3.8 removed it altogether. Its +predecessor function :py:func:`platform.dist` was already deprecated since +Python 2.6 and removed in Python 3.8. Still, there are many cases in which +access to OS distribution information is needed. See `Python issue 1322 +`_ for more information. """ +import argparse +import json +import logging import os import re -import sys -import json import shlex -import logging -import argparse import subprocess +import sys +import warnings +from typing import ( + Any, + Callable, + Dict, + Iterable, + Optional, + Sequence, + TextIO, + Tuple, + Type, +) + +try: + from typing import TypedDict +except ImportError: + # Python 3.7 + TypedDict = dict + +__version__ = "1.9.0" -_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc') -_OS_RELEASE_BASENAME = 'os-release' +class VersionDict(TypedDict): + major: str + minor: str + build_number: str + + +class InfoDict(TypedDict): + id: str + version: str + version_parts: VersionDict + like: str + codename: str + + +_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc") +_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib") +_OS_RELEASE_BASENAME = "os-release" #: Translation table for normalizing the "ID" attribute defined in os-release #: files, for use by the :func:`distro.id` method. @@ -49,7 +84,8 @@ #: #: * Value: Normalized value. NORMALIZED_OS_ID = { - 'ol': 'oracle', # Oracle Linux + "ol": "oracle", # Oracle Linux + "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap } #: Translation table for normalizing the "Distributor ID" attribute returned by @@ -60,11 +96,11 @@ #: #: * Value: Normalized value. NORMALIZED_LSB_ID = { - 'enterpriseenterpriseas': 'oracle', # Oracle Enterprise Linux 4 - 'enterpriseenterpriseserver': 'oracle', # Oracle Linux 5 - 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation - 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server - 'redhatenterprisecomputenode': 'rhel', # RHEL 6 ComputeNode + "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4 + "enterpriseenterpriseserver": "oracle", # Oracle Linux 5 + "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation + "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server + "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode } #: Translation table for normalizing the distro ID derived from the file name @@ -75,30 +111,61 @@ #: #: * Value: Normalized value. NORMALIZED_DISTRO_ID = { - 'redhat': 'rhel', # RHEL 6.x, 7.x + "redhat": "rhel", # RHEL 6.x, 7.x } # Pattern for content of distro release file (reversed) _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile( - r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)') + r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)" +) # Pattern for base file name of distro release file -_DISTRO_RELEASE_BASENAME_PATTERN = re.compile( - r'(\w+)[-_](release|version)$') +_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$") + +# Base file names to be looked up for if _UNIXCONFDIR is not readable. +_DISTRO_RELEASE_BASENAMES = [ + "SuSE-release", + "altlinux-release", + "arch-release", + "base-release", + "centos-release", + "fedora-release", + "gentoo-release", + "mageia-release", + "mandrake-release", + "mandriva-release", + "mandrivalinux-release", + "manjaro-release", + "oracle-release", + "redhat-release", + "rocky-release", + "sl-release", + "slackware-version", +] # Base file names to be ignored when searching for distro release file _DISTRO_RELEASE_IGNORE_BASENAMES = ( - 'debian_version', - 'lsb-release', - 'oem-release', + "debian_version", + "lsb-release", + "oem-release", _OS_RELEASE_BASENAME, - 'system-release', - 'plesk-release', + "system-release", + "plesk-release", + "iredmail-release", + "board-release", + "ec2_version", ) -def linux_distribution(full_distribution_name=True): +def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]: """ + .. deprecated:: 1.6.0 + + :func:`distro.linux_distribution()` is deprecated. It should only be + used as a compatibility shim with Python's + :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`, + :func:`distro.version` and :func:`distro.name` instead. + Return information about the current OS distribution as a tuple ``(id_name, version, codename)`` with items as follows: @@ -107,7 +174,8 @@ def linux_distribution(full_distribution_name=True): * ``version``: The result of :func:`distro.version`. - * ``codename``: The result of :func:`distro.codename`. + * ``codename``: The extra item (usually in parentheses) after the + os-release version number, or the result of :func:`distro.codename`. The interface of this function is compatible with the original :py:func:`platform.linux_distribution` function, supporting a subset of @@ -122,10 +190,17 @@ def linux_distribution(full_distribution_name=True): method normalizes the distro ID string to a reliable machine-readable value for a number of popular OS distributions. """ + warnings.warn( + "distro.linux_distribution() is deprecated. It should only be used as a " + "compatibility shim with Python's platform.linux_distribution(). Please use " + "distro.id(), distro.version() and distro.name() instead.", + DeprecationWarning, + stacklevel=2, + ) return _distro.linux_distribution(full_distribution_name) -def id(): +def id() -> str: """ Return the distro ID of the current distribution, as a machine-readable string. @@ -146,8 +221,9 @@ def id(): "fedora" Fedora "sles" SUSE Linux Enterprise Server "opensuse" openSUSE - "amazon" Amazon Linux + "amzn" Amazon Linux "arch" Arch Linux + "buildroot" Buildroot "cloudlinux" CloudLinux OS "exherbo" Exherbo Linux "gentoo" GenToo Linux @@ -167,6 +243,10 @@ def id(): "netbsd" NetBSD "freebsd" FreeBSD "midnightbsd" MidnightBSD + "rocky" Rocky Linux + "aix" AIX + "guix" Guix System + "altlinux" ALT Linux ============== ========================================= If you have a need to get distros for reliable IDs added into this set, @@ -204,7 +284,7 @@ def id(): return _distro.id() -def name(pretty=False): +def name(pretty: bool = False) -> str: """ Return the name of the current OS distribution, as a human-readable string. @@ -243,7 +323,7 @@ def name(pretty=False): return _distro.name(pretty) -def version(pretty=False, best=False): +def version(pretty: bool = False, best: bool = False) -> str: """ Return the version of the current OS distribution, as a human-readable string. @@ -259,6 +339,10 @@ def version(pretty=False, best=False): sources in a fixed priority order does not always yield the most precise version (e.g. for Debian 8.2, or CentOS 7.1). + Some other distributions may not provide this kind of information. In these + cases, an empty string would be returned. This behavior can be observed + with rolling releases distributions (e.g. Arch Linux). + The *best* parameter can be used to control the approach for the returned version: @@ -287,7 +371,7 @@ def version(pretty=False, best=False): return _distro.version(pretty, best) -def version_parts(best=False): +def version_parts(best: bool = False) -> Tuple[str, str, str]: """ Return the version of the current OS distribution as a tuple ``(major, minor, build_number)`` with items as follows: @@ -304,7 +388,7 @@ def version_parts(best=False): return _distro.version_parts(best) -def major_version(best=False): +def major_version(best: bool = False) -> str: """ Return the major version of the current OS distribution, as a string, if provided. @@ -317,7 +401,7 @@ def major_version(best=False): return _distro.major_version(best) -def minor_version(best=False): +def minor_version(best: bool = False) -> str: """ Return the minor version of the current OS distribution, as a string, if provided. @@ -330,7 +414,7 @@ def minor_version(best=False): return _distro.minor_version(best) -def build_number(best=False): +def build_number(best: bool = False) -> str: """ Return the build number of the current OS distribution, as a string, if provided. @@ -343,7 +427,7 @@ def build_number(best=False): return _distro.build_number(best) -def like(): +def like() -> str: """ Return a space-separated list of distro IDs of distributions that are closely related to the current OS distribution in regards to packaging @@ -360,7 +444,7 @@ def like(): return _distro.like() -def codename(): +def codename() -> str: """ Return the codename for the release of the current OS distribution, as a string. @@ -384,7 +468,7 @@ def codename(): return _distro.codename() -def info(pretty=False, best=False): +def info(pretty: bool = False, best: bool = False) -> InfoDict: """ Return certain machine-readable information items about the current OS distribution in a dictionary, as shown in the following example: @@ -428,7 +512,7 @@ def info(pretty=False, best=False): return _distro.info(pretty, best) -def os_release_info(): +def os_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the current OS distribution. @@ -438,7 +522,7 @@ def os_release_info(): return _distro.os_release_info() -def lsb_release_info(): +def lsb_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the current OS distribution. @@ -449,7 +533,7 @@ def lsb_release_info(): return _distro.lsb_release_info() -def distro_release_info(): +def distro_release_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. @@ -459,7 +543,7 @@ def distro_release_info(): return _distro.distro_release_info() -def uname_info(): +def uname_info() -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the current OS distribution. @@ -467,7 +551,7 @@ def uname_info(): return _distro.uname_info() -def os_release_attr(attribute): +def os_release_attr(attribute: str) -> str: """ Return a single named information item from the os-release file data source of the current OS distribution. @@ -486,7 +570,7 @@ def os_release_attr(attribute): return _distro.os_release_attr(attribute) -def lsb_release_attr(attribute): +def lsb_release_attr(attribute: str) -> str: """ Return a single named information item from the lsb_release command output data source of the current OS distribution. @@ -506,7 +590,7 @@ def lsb_release_attr(attribute): return _distro.lsb_release_attr(attribute) -def distro_release_attr(attribute): +def distro_release_attr(attribute: str) -> str: """ Return a single named information item from the distro release file data source of the current OS distribution. @@ -525,7 +609,7 @@ def distro_release_attr(attribute): return _distro.distro_release_attr(attribute) -def uname_attr(attribute): +def uname_attr(attribute: str) -> str: """ Return a single named information item from the distro release file data source of the current OS distribution. @@ -542,22 +626,27 @@ def uname_attr(attribute): return _distro.uname_attr(attribute) -class cached_property(object): - """A version of @property which caches the value. On access, it calls the - underlying function and sets the value in `__dict__` so future accesses - will not re-call the property. - """ - def __init__(self, f): - self._fname = f.__name__ - self._f = f +try: + from functools import cached_property +except ImportError: + # Python < 3.8 + class cached_property: # type: ignore + """A version of @property which caches the value. On access, it calls the + underlying function and sets the value in `__dict__` so future accesses + will not re-call the property. + """ - def __get__(self, obj, owner): - assert obj is not None, 'call {} on an instance'.format(self._fname) - ret = obj.__dict__[self._fname] = self._f(obj) - return ret + def __init__(self, f: Callable[[Any], Any]) -> None: + self._fname = f.__name__ + self._f = f + def __get__(self, obj: Any, owner: Type[Any]) -> Any: + assert obj is not None, f"call {self._fname} on an instance" + ret = obj.__dict__[self._fname] = self._f(obj) + return ret -class LinuxDistribution(object): + +class LinuxDistribution: """ Provides information about a OS distribution. @@ -575,11 +664,15 @@ class LinuxDistribution(object): lsb_release command. """ - def __init__(self, - include_lsb=True, - os_release_file='', - distro_release_file='', - include_uname=True): + def __init__( + self, + include_lsb: Optional[bool] = None, + os_release_file: str = "", + distro_release_file: str = "", + include_uname: Optional[bool] = None, + root_dir: Optional[str] = None, + include_oslevel: Optional[bool] = None, + ) -> None: """ The initialization method of this class gathers information from the available data sources, and stores that in private instance attributes. @@ -618,6 +711,15 @@ def __init__(self, the program execution path the data source for the uname command will be empty. + * ``root_dir`` (string): The absolute path to the root directory to use + to find distro-related information files. Note that ``include_*`` + parameters must not be enabled in combination with ``root_dir``. + + * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command + output is included as a data source. If the oslevel command is not + available in the program execution path the data source will be + empty. + Public instance attributes: * ``os_release_file`` (string): The path name of the @@ -635,40 +737,86 @@ def __init__(self, parameter. This controls whether the uname information will be loaded. + * ``include_oslevel`` (bool): The result of the ``include_oslevel`` + parameter. This controls whether (AIX) oslevel information will be + loaded. + + * ``root_dir`` (string): The result of the ``root_dir`` parameter. + The absolute path to the root directory to use to find distro-related + information files. + Raises: - * :py:exc:`IOError`: Some I/O issue with an os-release file or distro - release file. + * :py:exc:`ValueError`: Initialization parameters combination is not + supported. - * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had - some issue (other than not being available in the program execution - path). + * :py:exc:`OSError`: Some I/O issue with an os-release file or distro + release file. * :py:exc:`UnicodeError`: A data source has unexpected characters or uses an unexpected encoding. """ - self.os_release_file = os_release_file or \ - os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME) - self.distro_release_file = distro_release_file or '' # updated later - self.include_lsb = include_lsb - self.include_uname = include_uname - - def __repr__(self): - """Return repr of all info - """ - return \ - "LinuxDistribution(" \ - "os_release_file={self.os_release_file!r}, " \ - "distro_release_file={self.distro_release_file!r}, " \ - "include_lsb={self.include_lsb!r}, " \ - "include_uname={self.include_uname!r}, " \ - "_os_release_info={self._os_release_info!r}, " \ - "_lsb_release_info={self._lsb_release_info!r}, " \ - "_distro_release_info={self._distro_release_info!r}, " \ - "_uname_info={self._uname_info!r})".format( - self=self) - - def linux_distribution(self, full_distribution_name=True): + self.root_dir = root_dir + self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR + self.usr_lib_dir = ( + os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR + ) + + if os_release_file: + self.os_release_file = os_release_file + else: + etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME) + usr_lib_os_release_file = os.path.join( + self.usr_lib_dir, _OS_RELEASE_BASENAME + ) + + # NOTE: The idea is to respect order **and** have it set + # at all times for API backwards compatibility. + if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile( + usr_lib_os_release_file + ): + self.os_release_file = etc_dir_os_release_file + else: + self.os_release_file = usr_lib_os_release_file + + self.distro_release_file = distro_release_file or "" # updated later + + is_root_dir_defined = root_dir is not None + if is_root_dir_defined and (include_lsb or include_uname or include_oslevel): + raise ValueError( + "Including subprocess data sources from specific root_dir is disallowed" + " to prevent false information" + ) + self.include_lsb = ( + include_lsb if include_lsb is not None else not is_root_dir_defined + ) + self.include_uname = ( + include_uname if include_uname is not None else not is_root_dir_defined + ) + self.include_oslevel = ( + include_oslevel if include_oslevel is not None else not is_root_dir_defined + ) + + def __repr__(self) -> str: + """Return repr of all info""" + return ( + "LinuxDistribution(" + "os_release_file={self.os_release_file!r}, " + "distro_release_file={self.distro_release_file!r}, " + "include_lsb={self.include_lsb!r}, " + "include_uname={self.include_uname!r}, " + "include_oslevel={self.include_oslevel!r}, " + "root_dir={self.root_dir!r}, " + "_os_release_info={self._os_release_info!r}, " + "_lsb_release_info={self._lsb_release_info!r}, " + "_distro_release_info={self._distro_release_info!r}, " + "_uname_info={self._uname_info!r}, " + "_oslevel_info={self._oslevel_info!r})".format(self=self) + ) + + def linux_distribution( + self, full_distribution_name: bool = True + ) -> Tuple[str, str, str]: """ Return information about the OS distribution that is compatible with Python's :func:`platform.linux_distribution`, supporting a subset @@ -679,92 +827,103 @@ def linux_distribution(self, full_distribution_name=True): return ( self.name() if full_distribution_name else self.id(), self.version(), - self.codename() + self._os_release_info.get("release_codename") or self.codename(), ) - def id(self): + def id(self) -> str: """Return the distro ID of the OS distribution, as a string. For details, see :func:`distro.id`. """ - def normalize(distro_id, table): - distro_id = distro_id.lower().replace(' ', '_') + + def normalize(distro_id: str, table: Dict[str, str]) -> str: + distro_id = distro_id.lower().replace(" ", "_") return table.get(distro_id, distro_id) - distro_id = self.os_release_attr('id') + distro_id = self.os_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_OS_ID) - distro_id = self.lsb_release_attr('distributor_id') + distro_id = self.lsb_release_attr("distributor_id") if distro_id: return normalize(distro_id, NORMALIZED_LSB_ID) - distro_id = self.distro_release_attr('id') + distro_id = self.distro_release_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) - distro_id = self.uname_attr('id') + distro_id = self.uname_attr("id") if distro_id: return normalize(distro_id, NORMALIZED_DISTRO_ID) - return '' + return "" - def name(self, pretty=False): + def name(self, pretty: bool = False) -> str: """ Return the name of the OS distribution, as a string. For details, see :func:`distro.name`. """ - name = self.os_release_attr('name') \ - or self.lsb_release_attr('distributor_id') \ - or self.distro_release_attr('name') \ - or self.uname_attr('name') + name = ( + self.os_release_attr("name") + or self.lsb_release_attr("distributor_id") + or self.distro_release_attr("name") + or self.uname_attr("name") + ) if pretty: - name = self.os_release_attr('pretty_name') \ - or self.lsb_release_attr('description') + name = self.os_release_attr("pretty_name") or self.lsb_release_attr( + "description" + ) if not name: - name = self.distro_release_attr('name') \ - or self.uname_attr('name') + name = self.distro_release_attr("name") or self.uname_attr("name") version = self.version(pretty=True) if version: - name = name + ' ' + version - return name or '' + name = f"{name} {version}" + return name or "" - def version(self, pretty=False, best=False): + def version(self, pretty: bool = False, best: bool = False) -> str: """ Return the version of the OS distribution, as a string. For details, see :func:`distro.version`. """ versions = [ - self.os_release_attr('version_id'), - self.lsb_release_attr('release'), - self.distro_release_attr('version_id'), - self._parse_distro_release_content( - self.os_release_attr('pretty_name')).get('version_id', ''), + self.os_release_attr("version_id"), + self.lsb_release_attr("release"), + self.distro_release_attr("version_id"), + self._parse_distro_release_content(self.os_release_attr("pretty_name")).get( + "version_id", "" + ), self._parse_distro_release_content( - self.lsb_release_attr('description')).get('version_id', ''), - self.uname_attr('release') + self.lsb_release_attr("description") + ).get("version_id", ""), + self.uname_attr("release"), ] - version = '' + if self.uname_attr("id").startswith("aix"): + # On AIX platforms, prefer oslevel command output. + versions.insert(0, self.oslevel_info()) + elif self.id() == "debian" or "debian" in self.like().split(): + # On Debian-like, add debian_version file content to candidates list. + versions.append(self._debian_version) + version = "" if best: # This algorithm uses the last version in priority order that has # the best precision. If the versions are not in conflict, that # does not matter; otherwise, using the last one instead of the # first one might be considered a surprise. for v in versions: - if v.count(".") > version.count(".") or version == '': + if v.count(".") > version.count(".") or version == "": version = v else: for v in versions: - if v != '': + if v != "": version = v break if pretty and version and self.codename(): - version = '{0} ({1})'.format(version, self.codename()) + version = f"{version} ({self.codename()})" return version - def version_parts(self, best=False): + def version_parts(self, best: bool = False) -> Tuple[str, str, str]: """ Return the version of the OS distribution, as a tuple of version numbers. @@ -773,14 +932,14 @@ def version_parts(self, best=False): """ version_str = self.version(best=best) if version_str: - version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?') + version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?") matches = version_regex.match(version_str) if matches: major, minor, build_number = matches.groups() - return major, minor or '', build_number or '' - return '', '', '' + return major, minor or "", build_number or "" + return "", "", "" - def major_version(self, best=False): + def major_version(self, best: bool = False) -> str: """ Return the major version number of the current distribution. @@ -788,7 +947,7 @@ def major_version(self, best=False): """ return self.version_parts(best)[0] - def minor_version(self, best=False): + def minor_version(self, best: bool = False) -> str: """ Return the minor version number of the current distribution. @@ -796,7 +955,7 @@ def minor_version(self, best=False): """ return self.version_parts(best)[1] - def build_number(self, best=False): + def build_number(self, best: bool = False) -> str: """ Return the build number of the current distribution. @@ -804,15 +963,15 @@ def build_number(self, best=False): """ return self.version_parts(best)[2] - def like(self): + def like(self) -> str: """ Return the IDs of distributions that are like the OS distribution. For details, see :func:`distro.like`. """ - return self.os_release_attr('id_like') or '' + return self.os_release_attr("id_like") or "" - def codename(self): + def codename(self) -> str: """ Return the codename of the OS distribution. @@ -821,32 +980,34 @@ def codename(self): try: # Handle os_release specially since distros might purposefully set # this to empty string to have no codename - return self._os_release_info['codename'] + return self._os_release_info["codename"] except KeyError: - return self.lsb_release_attr('codename') \ - or self.distro_release_attr('codename') \ - or '' + return ( + self.lsb_release_attr("codename") + or self.distro_release_attr("codename") + or "" + ) - def info(self, pretty=False, best=False): + def info(self, pretty: bool = False, best: bool = False) -> InfoDict: """ Return certain machine-readable information about the OS distribution. For details, see :func:`distro.info`. """ - return dict( + return InfoDict( id=self.id(), version=self.version(pretty, best), - version_parts=dict( + version_parts=VersionDict( major=self.major_version(best), minor=self.minor_version(best), - build_number=self.build_number(best) + build_number=self.build_number(best), ), like=self.like(), codename=self.codename(), ) - def os_release_info(self): + def os_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the os-release file data source of the OS distribution. @@ -855,7 +1016,7 @@ def os_release_info(self): """ return self._os_release_info - def lsb_release_info(self): + def lsb_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the lsb_release command data source of the OS @@ -865,7 +1026,7 @@ def lsb_release_info(self): """ return self._lsb_release_info - def distro_release_info(self): + def distro_release_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the distro release file data source of the OS @@ -875,7 +1036,7 @@ def distro_release_info(self): """ return self._distro_release_info - def uname_info(self): + def uname_info(self) -> Dict[str, str]: """ Return a dictionary containing key-value pairs for the information items from the uname command data source of the OS distribution. @@ -884,44 +1045,50 @@ def uname_info(self): """ return self._uname_info - def os_release_attr(self, attribute): + def oslevel_info(self) -> str: + """ + Return AIX' oslevel command output. + """ + return self._oslevel_info + + def os_release_attr(self, attribute: str) -> str: """ Return a single named information item from the os-release file data source of the OS distribution. For details, see :func:`distro.os_release_attr`. """ - return self._os_release_info.get(attribute, '') + return self._os_release_info.get(attribute, "") - def lsb_release_attr(self, attribute): + def lsb_release_attr(self, attribute: str) -> str: """ Return a single named information item from the lsb_release command output data source of the OS distribution. For details, see :func:`distro.lsb_release_attr`. """ - return self._lsb_release_info.get(attribute, '') + return self._lsb_release_info.get(attribute, "") - def distro_release_attr(self, attribute): + def distro_release_attr(self, attribute: str) -> str: """ Return a single named information item from the distro release file data source of the OS distribution. For details, see :func:`distro.distro_release_attr`. """ - return self._distro_release_info.get(attribute, '') + return self._distro_release_info.get(attribute, "") - def uname_attr(self, attribute): + def uname_attr(self, attribute: str) -> str: """ Return a single named information item from the uname command output data source of the OS distribution. - For details, see :func:`distro.uname_release_attr`. + For details, see :func:`distro.uname_attr`. """ - return self._uname_info.get(attribute, '') + return self._uname_info.get(attribute, "") @cached_property - def _os_release_info(self): + def _os_release_info(self) -> Dict[str, str]: """ Get the information items from the specified os-release file. @@ -929,12 +1096,12 @@ def _os_release_info(self): A dictionary containing all information items. """ if os.path.isfile(self.os_release_file): - with open(self.os_release_file) as release_file: + with open(self.os_release_file, encoding="utf-8") as release_file: return self._parse_os_release_content(release_file) return {} @staticmethod - def _parse_os_release_content(lines): + def _parse_os_release_content(lines: TextIO) -> Dict[str, str]: """ Parse the lines of an os-release file. @@ -951,16 +1118,6 @@ def _parse_os_release_content(lines): lexer = shlex.shlex(lines, posix=True) lexer.whitespace_split = True - # The shlex module defines its `wordchars` variable using literals, - # making it dependent on the encoding of the Python source file. - # In Python 2.6 and 2.7, the shlex source file is encoded in - # 'iso-8859-1', and the `wordchars` variable is defined as a byte - # string. This causes a UnicodeDecodeError to be raised when the - # parsed content is a unicode object. The following fix resolves that - # (... but it should be fixed in shlex...): - if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes): - lexer.wordchars = lexer.wordchars.decode('iso-8859-1') - tokens = list(lexer) for token in tokens: # At this point, all shell-like parsing has been done (i.e. @@ -969,37 +1126,32 @@ def _parse_os_release_content(lines): # stripped, etc.), so the tokens are now either: # * variable assignments: var=value # * commands or their arguments (not allowed in os-release) - if '=' in token: - k, v = token.split('=', 1) + # Ignore any tokens that are not variable assignments + if "=" in token: + k, v = token.split("=", 1) props[k.lower()] = v - else: - # Ignore any tokens that are not variable assignments - pass - if 'version_codename' in props: + if "version" in props: + # extract release codename (if any) from version attribute + match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"]) + if match: + release_codename = match.group(1) or match.group(2) + props["codename"] = props["release_codename"] = release_codename + + if "version_codename" in props: # os-release added a version_codename field. Use that in # preference to anything else Note that some distros purposefully # do not have code names. They should be setting # version_codename="" - props['codename'] = props['version_codename'] - elif 'ubuntu_codename' in props: + props["codename"] = props["version_codename"] + elif "ubuntu_codename" in props: # Same as above but a non-standard field name used on older Ubuntus - props['codename'] = props['ubuntu_codename'] - elif 'version' in props: - # If there is no version_codename, parse it from the version - codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version']) - if codename: - codename = codename.group() - codename = codename.strip('()') - codename = codename.strip(',') - codename = codename.strip() - # codename appears within paranthese. - props['codename'] = codename + props["codename"] = props["ubuntu_codename"] return props @cached_property - def _lsb_release_info(self): + def _lsb_release_info(self) -> Dict[str, str]: """ Get the information items from the lsb_release command output. @@ -1008,17 +1160,17 @@ def _lsb_release_info(self): """ if not self.include_lsb: return {} - with open(os.devnull, 'w') as devnull: - try: - cmd = ('lsb_release', '-a') - stdout = subprocess.check_output(cmd, stderr=devnull) - except OSError: # Command not found - return {} + try: + cmd = ("lsb_release", "-a") + stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + # Command not found or lsb_release returned error + except (OSError, subprocess.CalledProcessError): + return {} content = self._to_str(stdout).splitlines() return self._parse_lsb_release_content(content) @staticmethod - def _parse_lsb_release_content(lines): + def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]: """ Parse the output of the lsb_release command. @@ -1033,58 +1185,72 @@ def _parse_lsb_release_content(lines): """ props = {} for line in lines: - kv = line.strip('\n').split(':', 1) + kv = line.strip("\n").split(":", 1) if len(kv) != 2: # Ignore lines without colon. continue k, v = kv - props.update({k.replace(' ', '_').lower(): v.strip()}) + props.update({k.replace(" ", "_").lower(): v.strip()}) return props @cached_property - def _uname_info(self): - with open(os.devnull, 'w') as devnull: - try: - cmd = ('uname', '-rs') - stdout = subprocess.check_output(cmd, stderr=devnull) - except OSError: - return {} + def _uname_info(self) -> Dict[str, str]: + if not self.include_uname: + return {} + try: + cmd = ("uname", "-rs") + stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL) + except OSError: + return {} content = self._to_str(stdout).splitlines() return self._parse_uname_content(content) + @cached_property + def _oslevel_info(self) -> str: + if not self.include_oslevel: + return "" + try: + stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL) + except (OSError, subprocess.CalledProcessError): + return "" + return self._to_str(stdout).strip() + + @cached_property + def _debian_version(self) -> str: + try: + with open( + os.path.join(self.etc_dir, "debian_version"), encoding="ascii" + ) as fp: + return fp.readline().rstrip() + except FileNotFoundError: + return "" + @staticmethod - def _parse_uname_content(lines): + def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]: + if not lines: + return {} props = {} - match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip()) + match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip()) if match: name, version = match.groups() # This is to prevent the Linux kernel version from # appearing as the 'best' version on otherwise # identifiable distributions. - if name == 'Linux': + if name == "Linux": return {} - props['id'] = name.lower() - props['name'] = name - props['release'] = version + props["id"] = name.lower() + props["name"] = name + props["release"] = version return props @staticmethod - def _to_str(text): + def _to_str(bytestring: bytes) -> str: encoding = sys.getfilesystemencoding() - encoding = 'utf-8' if encoding == 'ascii' else encoding - - if sys.version_info[0] >= 3: - if isinstance(text, bytes): - return text.decode(encoding) - else: - if isinstance(text, unicode): # noqa - return text.encode(encoding) - - return text + return bytestring.decode(encoding) @cached_property - def _distro_release_info(self): + def _distro_release_info(self) -> Dict[str, str]: """ Get the information items from the specified distro release file. @@ -1094,23 +1260,21 @@ def _distro_release_info(self): if self.distro_release_file: # If it was specified, we use it and parse what we can, even if # its file name or content does not match the expected pattern. - distro_info = self._parse_distro_release_file( - self.distro_release_file) + distro_info = self._parse_distro_release_file(self.distro_release_file) basename = os.path.basename(self.distro_release_file) # The file name pattern for user-specified distro release files # is somewhat more tolerant (compared to when searching for the # file), because we want to use what was specified as best as # possible. match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if 'name' in distro_info \ - and 'cloudlinux' in distro_info['name'].lower(): - distro_info['id'] = 'cloudlinux' - elif match: - distro_info['id'] = match.group(1) - return distro_info else: try: - basenames = os.listdir(_UNIXCONFDIR) + basenames = [ + basename + for basename in os.listdir(self.etc_dir) + if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES + and os.path.isfile(os.path.join(self.etc_dir, basename)) + ] # We sort for repeatability in cases where there are multiple # distro specific files; e.g. CentOS, Oracle, Enterprise all # containing `redhat-release` on top of their own. @@ -1120,38 +1284,31 @@ def _distro_release_info(self): # sure about the *-release files. Check common entries of # /etc for information. If they turn out to not be there the # error is handled in `_parse_distro_release_file()`. - basenames = ['SuSE-release', - 'arch-release', - 'base-release', - 'centos-release', - 'fedora-release', - 'gentoo-release', - 'mageia-release', - 'mandrake-release', - 'mandriva-release', - 'mandrivalinux-release', - 'manjaro-release', - 'oracle-release', - 'redhat-release', - 'sl-release', - 'slackware-version'] + basenames = _DISTRO_RELEASE_BASENAMES for basename in basenames: - if basename in _DISTRO_RELEASE_IGNORE_BASENAMES: - continue match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename) - if match: - filepath = os.path.join(_UNIXCONFDIR, basename) - distro_info = self._parse_distro_release_file(filepath) - if 'name' in distro_info: - # The name is always present if the pattern matches - self.distro_release_file = filepath - distro_info['id'] = match.group(1) - if 'cloudlinux' in distro_info['name'].lower(): - distro_info['id'] = 'cloudlinux' - return distro_info - return {} + if match is None: + continue + filepath = os.path.join(self.etc_dir, basename) + distro_info = self._parse_distro_release_file(filepath) + # The name is always present if the pattern matches. + if "name" not in distro_info: + continue + self.distro_release_file = filepath + break + else: # the loop didn't "break": no candidate. + return {} - def _parse_distro_release_file(self, filepath): + if match is not None: + distro_info["id"] = match.group(1) + + # CloudLinux < 7: manually enrich info with proper id. + if "cloudlinux" in distro_info.get("name", "").lower(): + distro_info["id"] = "cloudlinux" + + return distro_info + + def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]: """ Parse a distro release file. @@ -1163,18 +1320,18 @@ def _parse_distro_release_file(self, filepath): A dictionary containing all information items. """ try: - with open(filepath) as fp: + with open(filepath, encoding="utf-8") as fp: # Only parse the first line. For instance, on SLES there # are multiple lines. We don't want them... return self._parse_distro_release_content(fp.readline()) - except (OSError, IOError): + except OSError: # Ignore not being able to read a specific, seemingly version # related file. - # See https://github.com/nir0s/distro/issues/162 + # See https://github.com/python-distro/distro/issues/162 return {} @staticmethod - def _parse_distro_release_content(line): + def _parse_distro_release_content(line: str) -> Dict[str, str]: """ Parse a line from a distro release file. @@ -1185,46 +1342,62 @@ def _parse_distro_release_content(line): Returns: A dictionary containing all information items. """ - matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match( - line.strip()[::-1]) + matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1]) distro_info = {} if matches: # regexp ensures non-None - distro_info['name'] = matches.group(3)[::-1] + distro_info["name"] = matches.group(3)[::-1] if matches.group(2): - distro_info['version_id'] = matches.group(2)[::-1] + distro_info["version_id"] = matches.group(2)[::-1] if matches.group(1): - distro_info['codename'] = matches.group(1)[::-1] + distro_info["codename"] = matches.group(1)[::-1] elif line: - distro_info['name'] = line.strip() + distro_info["name"] = line.strip() return distro_info _distro = LinuxDistribution() -def main(): +def main() -> None: logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) logger.addHandler(logging.StreamHandler(sys.stdout)) parser = argparse.ArgumentParser(description="OS distro info tool") parser.add_argument( - '--json', - '-j', - help="Output in machine readable format", - action="store_true") + "--json", "-j", help="Output in machine readable format", action="store_true" + ) + + parser.add_argument( + "--root-dir", + "-r", + type=str, + dest="root_dir", + help="Path to the root filesystem directory (defaults to /)", + ) + args = parser.parse_args() + if args.root_dir: + dist = LinuxDistribution( + include_lsb=False, + include_uname=False, + include_oslevel=False, + root_dir=args.root_dir, + ) + else: + dist = _distro + if args.json: - logger.info(json.dumps(info(), indent=4, sort_keys=True)) + logger.info(json.dumps(dist.info(), indent=4, sort_keys=True)) else: - logger.info('Name: %s', name(pretty=True)) - distribution_version = version(pretty=True) - logger.info('Version: %s', distribution_version) - distribution_codename = codename() - logger.info('Codename: %s', distribution_codename) + logger.info("Name: %s", dist.name(pretty=True)) + distribution_version = dist.version(pretty=True) + logger.info("Version: %s", distribution_version) + distribution_codename = dist.codename() + logger.info("Codename: %s", distribution_codename) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/src/rez/vendor/attr/py.typed b/src/rez/vendor/distro/py.typed similarity index 100% rename from src/rez/vendor/attr/py.typed rename to src/rez/vendor/distro/py.typed diff --git a/src/rez/vendor/progress/LICENSE b/src/rez/vendor/progress/LICENSE index 059cc0566..fb3ac14a6 100644 --- a/src/rez/vendor/progress/LICENSE +++ b/src/rez/vendor/progress/LICENSE @@ -1,4 +1,4 @@ -# Copyright (c) 2012 Giorgos Verigakis +# Copyright (c) 2012 Georgios Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above diff --git a/src/rez/vendor/progress/__init__.py b/src/rez/vendor/progress/__init__.py index e434c257f..b434b300a 100644 --- a/src/rez/vendor/progress/__init__.py +++ b/src/rez/vendor/progress/__init__.py @@ -1,4 +1,4 @@ -# Copyright (c) 2012 Giorgos Verigakis +# Copyright (c) 2012 Georgios Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -24,7 +24,7 @@ from time import time as monotonic -__version__ = '1.5' +__version__ = '1.6' HIDE_CURSOR = '\x1b[?25l' SHOW_CURSOR = '\x1b[?25h' @@ -46,14 +46,19 @@ def __init__(self, message='', **kwargs): for key, val in kwargs.items(): setattr(self, key, val) - self._width = 0 + self._max_width = 0 + self._hidden_cursor = False self.message = message if self.file and self.is_tty(): if self.hide_cursor: print(HIDE_CURSOR, end='', file=self.file) - print(self.message, end='', file=self.file) - self.file.flush() + self._hidden_cursor = True + self.writeln('') + + def __del__(self): + if self._hidden_cursor: + print(SHOW_CURSOR, end='', file=self.file) def __getitem__(self, key): if key.startswith('_'): @@ -85,31 +90,30 @@ def update(self): def start(self): pass - def clearln(self): - if self.file and self.is_tty(): - print('\r\x1b[K', end='', file=self.file) - - def write(self, s): - if self.file and self.is_tty(): - line = self.message + s.ljust(self._width) - print('\r' + line, end='', file=self.file) - self._width = max(self._width, len(s)) - self.file.flush() - def writeln(self, line): if self.file and self.is_tty(): - self.clearln() - print(line, end='', file=self.file) + width = len(line) + if width < self._max_width: + # Add padding to cover previous contents + line += ' ' * (self._max_width - width) + else: + self._max_width = width + print('\r' + line, end='', file=self.file) self.file.flush() def finish(self): if self.file and self.is_tty(): print(file=self.file) - if self.hide_cursor: + if self._hidden_cursor: print(SHOW_CURSOR, end='', file=self.file) + self._hidden_cursor = False def is_tty(self): - return self.file.isatty() if self.check_tty else True + try: + return self.file.isatty() if self.check_tty else True + except AttributeError: + msg = "%s has no attribute 'isatty'. Try setting check_tty=False." % self + raise AttributeError(msg) def next(self, n=1): now = monotonic() @@ -120,10 +124,13 @@ def next(self, n=1): self.update() def iter(self, it): + self.iter_value = None with self: for x in it: + self.iter_value = x yield x self.next() + del self.iter_value def __enter__(self): self.start() @@ -152,6 +159,8 @@ def percent(self): @property def progress(self): + if self.max == 0: + return 0 return min(1, self.index / self.max) @property @@ -171,7 +180,10 @@ def iter(self, it): except TypeError: pass + self.iter_value = None with self: for x in it: + self.iter_value = x yield x self.next() + del self.iter_value diff --git a/src/rez/vendor/progress/bar.py b/src/rez/vendor/progress/bar.py index 8819efda6..df4e8b61f 100644 --- a/src/rez/vendor/progress/bar.py +++ b/src/rez/vendor/progress/bar.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2012 Giorgos Verigakis +# Copyright (c) 2012 Georgios Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -19,6 +19,7 @@ import sys from . import Progress +from .colors import color class Bar(Progress): @@ -28,13 +29,14 @@ class Bar(Progress): bar_suffix = '| ' empty_fill = ' ' fill = '#' + color = None def update(self): filled_length = int(self.width * self.progress) empty_length = self.width - filled_length message = self.message % self - bar = self.fill * filled_length + bar = color(self.fill * filled_length, fg=self.color) empty = self.empty_fill * empty_length suffix = self.suffix % self line = ''.join([message, self.bar_prefix, bar, empty, self.bar_suffix, @@ -74,7 +76,7 @@ def update(self): nempty = self.width - nfull # Number of empty chars message = self.message % self - bar = self.phases[-1] * nfull + bar = color(self.phases[-1] * nfull, fg=self.color) current = self.phases[phase] if phase > 0 else '' empty = self.empty_fill * max(0, nempty - len(current)) suffix = self.suffix % self diff --git a/src/rez/vendor/progress/colors.py b/src/rez/vendor/progress/colors.py new file mode 100644 index 000000000..4e770f868 --- /dev/null +++ b/src/rez/vendor/progress/colors.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2020 Georgios Verigakis +# +# Permission to use, copy, modify, and distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +from functools import partial + + +COLORS = ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', + 'white') +STYLES = ('bold', 'faint', 'italic', 'underline', 'blink', 'blink2', + 'negative', 'concealed', 'crossed') + + +def color(s, fg=None, bg=None, style=None): + sgr = [] + + if fg: + if fg in COLORS: + sgr.append(str(30 + COLORS.index(fg))) + elif isinstance(fg, int) and 0 <= fg <= 255: + sgr.append('38;5;%d' % int(fg)) + else: + raise Exception('Invalid color "%s"' % fg) + + if bg: + if bg in COLORS: + sgr.append(str(40 + COLORS.index(bg))) + elif isinstance(bg, int) and 0 <= bg <= 255: + sgr.append('48;5;%d' % bg) + else: + raise Exception('Invalid color "%s"' % bg) + + if style: + for st in style.split('+'): + if st in STYLES: + sgr.append(str(1 + STYLES.index(st))) + else: + raise Exception('Invalid style "%s"' % st) + + if sgr: + prefix = '\x1b[' + ';'.join(sgr) + 'm' + suffix = '\x1b[0m' + return prefix + s + suffix + else: + return s + + +# Foreground shortcuts +black = partial(color, fg='black') +red = partial(color, fg='red') +green = partial(color, fg='green') +yellow = partial(color, fg='yellow') +blue = partial(color, fg='blue') +magenta = partial(color, fg='magenta') +cyan = partial(color, fg='cyan') +white = partial(color, fg='white') + +# Style shortcuts +bold = partial(color, style='bold') +faint = partial(color, style='faint') +italic = partial(color, style='italic') +underline = partial(color, style='underline') +blink = partial(color, style='blink') +blink2 = partial(color, style='blink2') +negative = partial(color, style='negative') +concealed = partial(color, style='concealed') +crossed = partial(color, style='crossed') diff --git a/src/rez/vendor/progress/counter.py b/src/rez/vendor/progress/counter.py index d955ca477..d0fbe7ef3 100644 --- a/src/rez/vendor/progress/counter.py +++ b/src/rez/vendor/progress/counter.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2012 Giorgos Verigakis +# Copyright (c) 2012 Georgios Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -20,12 +20,16 @@ class Counter(Infinite): def update(self): - self.write(str(self.index)) + message = self.message % self + line = ''.join([message, str(self.index)]) + self.writeln(line) class Countdown(Progress): def update(self): - self.write(str(self.remaining)) + message = self.message % self + line = ''.join([message, str(self.remaining)]) + self.writeln(line) class Stack(Progress): @@ -34,7 +38,9 @@ class Stack(Progress): def update(self): nphases = len(self.phases) i = min(nphases - 1, int(self.progress * nphases)) - self.write(self.phases[i]) + message = self.message % self + line = ''.join([message, self.phases[i]]) + self.writeln(line) class Pie(Stack): diff --git a/src/rez/vendor/progress/helpers.py b/src/rez/vendor/progress/helpers.py deleted file mode 100644 index 9ed90b2bc..000000000 --- a/src/rez/vendor/progress/helpers.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright (c) 2012 Giorgos Verigakis -# -# Permission to use, copy, modify, and distribute this software for any -# purpose with or without fee is hereby granted, provided that the above -# copyright notice and this permission notice appear in all copies. -# -# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -from __future__ import print_function - - -HIDE_CURSOR = '\x1b[?25l' -SHOW_CURSOR = '\x1b[?25h' - - -class WriteMixin(object): - hide_cursor = False - - def __init__(self, message=None, **kwargs): - super(WriteMixin, self).__init__(**kwargs) - self._width = 0 - if message: - self.message = message - - if self.file.isatty(): - if self.hide_cursor: - print(HIDE_CURSOR, end='', file=self.file) - print(self.message, end='', file=self.file) - self.file.flush() - - def write(self, s): - if self.file.isatty(): - b = '\b' * self._width - c = s.ljust(self._width) - print(b + c, end='', file=self.file) - self._width = max(self._width, len(s)) - self.file.flush() - - def finish(self): - if self.file.isatty() and self.hide_cursor: - print(SHOW_CURSOR, end='', file=self.file) - - -class WritelnMixin(object): - hide_cursor = False - - def __init__(self, message=None, **kwargs): - super(WritelnMixin, self).__init__(**kwargs) - if message: - self.message = message - - if self.file.isatty() and self.hide_cursor: - print(HIDE_CURSOR, end='', file=self.file) - - def clearln(self): - if self.file.isatty(): - print('\r\x1b[K', end='', file=self.file) - - def writeln(self, line): - if self.file.isatty(): - self.clearln() - print(line, end='', file=self.file) - self.file.flush() - - def finish(self): - if self.file.isatty(): - print(file=self.file) - if self.hide_cursor: - print(SHOW_CURSOR, end='', file=self.file) - - -from signal import signal, SIGINT -from sys import exit - - -class SigIntMixin(object): - """Registers a signal handler that calls finish on SIGINT""" - - def __init__(self, *args, **kwargs): - super(SigIntMixin, self).__init__(*args, **kwargs) - signal(SIGINT, self._sigint_handler) - - def _sigint_handler(self, signum, frame): - self.finish() - exit(0) diff --git a/src/rez/vendor/progress/spinner.py b/src/rez/vendor/progress/spinner.py index 4e100cabb..d593a203e 100644 --- a/src/rez/vendor/progress/spinner.py +++ b/src/rez/vendor/progress/spinner.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright (c) 2012 Giorgos Verigakis +# Copyright (c) 2012 Georgios Verigakis # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above @@ -24,7 +24,9 @@ class Spinner(Infinite): def update(self): i = self.index % len(self.phases) - self.write(self.phases[i]) + message = self.message % self + line = ''.join([message, self.phases[i]]) + self.writeln(line) class PieSpinner(Spinner): diff --git a/src/rez/vendor/six/LICENSE b/src/rez/vendor/six/LICENSE index 8c7bca3aa..1cc22a5aa 100644 --- a/src/rez/vendor/six/LICENSE +++ b/src/rez/vendor/six/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2010-2014 Benjamin Peterson +Copyright (c) 2010-2024 Benjamin Peterson Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in @@ -15,4 +15,4 @@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/rez/vendor/six/README.rst b/src/rez/vendor/six/README.rst new file mode 100644 index 000000000..3674bc956 --- /dev/null +++ b/src/rez/vendor/six/README.rst @@ -0,0 +1,25 @@ +.. image:: https://img.shields.io/pypi/v/six.svg + :target: https://pypi.org/project/six/ + :alt: six on PyPI + +.. image:: https://readthedocs.org/projects/six/badge/?version=latest + :target: https://six.readthedocs.io/ + :alt: six's documentation on Read the Docs + +.. image:: https://img.shields.io/badge/license-MIT-green.svg + :target: https://github.com/benjaminp/six/blob/master/LICENSE + :alt: MIT License badge + +Six is a Python 2 and 3 compatibility library. It provides utility functions +for smoothing over the differences between the Python versions with the goal of +writing Python code that is compatible on both Python versions. See the +documentation for more information on what is provided. + +Six supports Python 2.7 and 3.3+. It is contained in only one Python +file, so it can be easily copied into your project. (The copyright and license +notice must be retained.) + +Online documentation is at https://six.readthedocs.io/. + +Bugs can be reported to https://github.com/benjaminp/six. The code can also +be found there. diff --git a/src/rez/vendor/six/six.py b/src/rez/vendor/six/six.py index 89b2188fd..3de5969b1 100644 --- a/src/rez/vendor/six/six.py +++ b/src/rez/vendor/six/six.py @@ -1,4 +1,4 @@ -# Copyright (c) 2010-2018 Benjamin Peterson +# Copyright (c) 2010-2024 Benjamin Peterson # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -29,7 +29,7 @@ import types __author__ = "Benjamin Peterson " -__version__ = "1.12.0" +__version__ = "1.17.0" # Useful for very coarse version differentiation. @@ -71,6 +71,11 @@ def __len__(self): MAXSIZE = int((1 << 63) - 1) del X +if PY34: + from importlib.util import spec_from_loader +else: + spec_from_loader = None + def _add_doc(func, doc): """Add documentation to a function.""" @@ -186,6 +191,11 @@ def find_module(self, fullname, path=None): return self return None + def find_spec(self, fullname, path, target=None): + if fullname in self.known_modules: + return spec_from_loader(fullname, self) + return None + def __get_module(self, fullname): try: return self.known_modules[fullname] @@ -223,6 +233,12 @@ def get_code(self, fullname): return None get_source = get_code # same as get_code + def create_module(self, spec): + return self.load_module(spec.name) + + def exec_module(self, module): + pass + _importer = _SixMetaPathImporter(__name__) @@ -247,7 +263,7 @@ class _MovedItems(_LazyModule): MovedAttribute("reduce", "__builtin__", "functools"), MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserDict", "UserDict", "collections", "IterableUserDict", "UserDict"), MovedAttribute("UserList", "UserList", "collections"), MovedAttribute("UserString", "UserString", "collections"), MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), @@ -255,9 +271,11 @@ class _MovedItems(_LazyModule): MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), MovedModule("builtins", "__builtin__"), MovedModule("configparser", "ConfigParser"), + MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"), MovedModule("copyreg", "copy_reg"), MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"), MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), MovedModule("http_cookies", "Cookie", "http.cookies"), MovedModule("html_entities", "htmlentitydefs", "html.entities"), @@ -417,12 +435,17 @@ class Module_six_moves_urllib_request(_LazyModule): MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), MovedAttribute("urlretrieve", "urllib", "urllib.request"), MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), MovedAttribute("proxy_bypass", "urllib", "urllib.request"), MovedAttribute("parse_http_list", "urllib2", "urllib.request"), MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), ] +if sys.version_info[:2] < (3, 14): + _urllib_request_moved_attributes.extend( + [ + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + ] + ) for attr in _urllib_request_moved_attributes: setattr(Module_six_moves_urllib_request, attr.name, attr) del attr @@ -637,13 +660,16 @@ def u(s): import io StringIO = io.StringIO BytesIO = io.BytesIO + del io _assertCountEqual = "assertCountEqual" if sys.version_info[1] <= 1: _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" + _assertNotRegex = "assertNotRegexpMatches" else: _assertRaisesRegex = "assertRaisesRegex" _assertRegex = "assertRegex" + _assertNotRegex = "assertNotRegex" else: def b(s): return s @@ -665,6 +691,7 @@ def indexbytes(buf, i): _assertCountEqual = "assertItemsEqual" _assertRaisesRegex = "assertRaisesRegexp" _assertRegex = "assertRegexpMatches" + _assertNotRegex = "assertNotRegexpMatches" _add_doc(b, """Byte literal""") _add_doc(u, """Text literal""") @@ -681,6 +708,10 @@ def assertRegex(self, *args, **kwargs): return getattr(self, _assertRegex)(*args, **kwargs) +def assertNotRegex(self, *args, **kwargs): + return getattr(self, _assertNotRegex)(*args, **kwargs) + + if PY3: exec_ = getattr(moves.builtins, "exec") @@ -716,16 +747,7 @@ def exec_(_code_, _globs_=None, _locs_=None): """) -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - try: - if from_value is None: - raise value - raise value from from_value - finally: - value = None -""") -elif sys.version_info[:2] > (3, 2): +if sys.version_info[:2] > (3,): exec_("""def raise_from(value, from_value): try: raise value from from_value @@ -805,13 +827,33 @@ def print_(*args, **kwargs): _add_doc(reraise, """Reraise an exception.""") if sys.version_info[0:2] < (3, 4): + # This does exactly the same what the :func:`py3:functools.update_wrapper` + # function does on Python versions after 3.2. It sets the ``__wrapped__`` + # attribute on ``wrapper`` object and it doesn't raise an error if any of + # the attributes mentioned in ``assigned`` and ``updated`` are missing on + # ``wrapped`` object. + def _update_wrapper(wrapper, wrapped, + assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + for attr in assigned: + try: + value = getattr(wrapped, attr) + except AttributeError: + continue + else: + setattr(wrapper, attr, value) + for attr in updated: + getattr(wrapper, attr).update(getattr(wrapped, attr, {})) + wrapper.__wrapped__ = wrapped + return wrapper + _update_wrapper.__doc__ = functools.update_wrapper.__doc__ + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper + return functools.partial(_update_wrapper, wrapped=wrapped, + assigned=assigned, updated=updated) + wraps.__doc__ = functools.wraps.__doc__ + else: wraps = functools.wraps @@ -824,7 +866,15 @@ def with_metaclass(meta, *bases): class metaclass(type): def __new__(cls, name, this_bases, d): - return meta(name, bases, d) + if sys.version_info[:2] >= (3, 7): + # This version introduced PEP 560 that requires a bit + # of extra care (we mimic what is done by __build_class__). + resolved_bases = types.resolve_bases(bases) + if resolved_bases is not bases: + d['__orig_bases__'] = bases + else: + resolved_bases = bases + return meta(name, resolved_bases, d) @classmethod def __prepare__(cls, name, this_bases): @@ -861,12 +911,11 @@ def ensure_binary(s, encoding='utf-8', errors='strict'): - `str` -> encoded to `bytes` - `bytes` -> `bytes` """ + if isinstance(s, binary_type): + return s if isinstance(s, text_type): return s.encode(encoding, errors) - elif isinstance(s, binary_type): - return s - else: - raise TypeError("not expecting type '%s'" % type(s)) + raise TypeError("not expecting type '%s'" % type(s)) def ensure_str(s, encoding='utf-8', errors='strict'): @@ -880,12 +929,15 @@ def ensure_str(s, encoding='utf-8', errors='strict'): - `str` -> `str` - `bytes` -> decoded to `str` """ - if not isinstance(s, (text_type, binary_type)): - raise TypeError("not expecting type '%s'" % type(s)) + # Optimization: Fast return for the common case. + if type(s) is str: + return s if PY2 and isinstance(s, text_type): - s = s.encode(encoding, errors) + return s.encode(encoding, errors) elif PY3 and isinstance(s, binary_type): - s = s.decode(encoding, errors) + return s.decode(encoding, errors) + elif not isinstance(s, (text_type, binary_type)): + raise TypeError("not expecting type '%s'" % type(s)) return s @@ -908,10 +960,9 @@ def ensure_text(s, encoding='utf-8', errors='strict'): raise TypeError("not expecting type '%s'" % type(s)) - def python_2_unicode_compatible(klass): """ - A decorator that defines __unicode__ and __str__ methods under Python 2. + A class decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method diff --git a/src/rez/vendor/yaml/LICENSE b/src/rez/vendor/yaml/LICENSE index 050ced23f..2f1b8e15e 100644 --- a/src/rez/vendor/yaml/LICENSE +++ b/src/rez/vendor/yaml/LICENSE @@ -1,4 +1,5 @@ -Copyright (c) 2006 Kirill Simonov +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/src/rez/vendor/yaml/__init__.py b/src/rez/vendor/yaml/__init__.py index 5df0bb5fd..824936194 100644 --- a/src/rez/vendor/yaml/__init__.py +++ b/src/rez/vendor/yaml/__init__.py @@ -8,7 +8,7 @@ from .loader import * from .dumper import * -__version__ = '5.1' +__version__ = '6.0.1' try: from .cyaml import * __with_libyaml__ = True @@ -18,41 +18,12 @@ import io #------------------------------------------------------------------------------ -# Warnings control +# XXX "Warnings control" is now deprecated. Leaving in the API function to not +# break code that uses it. #------------------------------------------------------------------------------ - -# 'Global' warnings state: -_warnings_enabled = { - 'YAMLLoadWarning': True, -} - -# Get or set global warnings' state def warnings(settings=None): if settings is None: - return _warnings_enabled - - if type(settings) is dict: - for key in settings: - if key in _warnings_enabled: - _warnings_enabled[key] = settings[key] - -# Warn when load() is called without Loader=... -class YAMLLoadWarning(RuntimeWarning): - pass - -def load_warning(method): - if _warnings_enabled['YAMLLoadWarning'] is False: - return - - import warnings - - message = ( - "calling yaml.%s() without Loader=... is deprecated, as the " - "default Loader is unsafe. Please read " - "https://msg.pyyaml.org/load for full details." - ) % method - - warnings.warn(message, YAMLLoadWarning, stacklevel=3) + return {} #------------------------------------------------------------------------------ def scan(stream, Loader=Loader): @@ -100,30 +71,22 @@ def compose_all(stream, Loader=Loader): finally: loader.dispose() -def load(stream, Loader=None): +def load(stream, Loader): """ Parse the first YAML document in a stream and produce the corresponding Python object. """ - if Loader is None: - load_warning('load') - Loader = FullLoader - loader = Loader(stream) try: return loader.get_single_data() finally: loader.dispose() -def load_all(stream, Loader=None): +def load_all(stream, Loader): """ Parse all YAML documents in a stream and produce corresponding Python objects. """ - if Loader is None: - load_warning('load_all') - Loader = FullLoader - loader = Loader(stream) try: while loader.check_data(): @@ -306,42 +269,62 @@ def safe_dump(data, stream=None, **kwds): return dump_all([data], stream, Dumper=SafeDumper, **kwds) def add_implicit_resolver(tag, regexp, first=None, - Loader=Loader, Dumper=Dumper): + Loader=None, Dumper=Dumper): """ Add an implicit scalar detector. If an implicit scalar value matches the given regexp, the corresponding tag is assigned to the scalar. first is a sequence of possible initial characters or None. """ - Loader.add_implicit_resolver(tag, regexp, first) + if Loader is None: + loader.Loader.add_implicit_resolver(tag, regexp, first) + loader.FullLoader.add_implicit_resolver(tag, regexp, first) + loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first) + else: + Loader.add_implicit_resolver(tag, regexp, first) Dumper.add_implicit_resolver(tag, regexp, first) -def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper): +def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper): """ Add a path based resolver for the given tag. A path is a list of keys that forms a path to a node in the representation tree. Keys can be string values, integers, or None. """ - Loader.add_path_resolver(tag, path, kind) + if Loader is None: + loader.Loader.add_path_resolver(tag, path, kind) + loader.FullLoader.add_path_resolver(tag, path, kind) + loader.UnsafeLoader.add_path_resolver(tag, path, kind) + else: + Loader.add_path_resolver(tag, path, kind) Dumper.add_path_resolver(tag, path, kind) -def add_constructor(tag, constructor, Loader=Loader): +def add_constructor(tag, constructor, Loader=None): """ Add a constructor for the given tag. Constructor is a function that accepts a Loader instance and a node object and produces the corresponding Python object. """ - Loader.add_constructor(tag, constructor) + if Loader is None: + loader.Loader.add_constructor(tag, constructor) + loader.FullLoader.add_constructor(tag, constructor) + loader.UnsafeLoader.add_constructor(tag, constructor) + else: + Loader.add_constructor(tag, constructor) -def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader): +def add_multi_constructor(tag_prefix, multi_constructor, Loader=None): """ Add a multi-constructor for the given tag prefix. Multi-constructor is called for a node if its tag starts with tag_prefix. Multi-constructor accepts a Loader instance, a tag suffix, and a node object and produces the corresponding Python object. """ - Loader.add_multi_constructor(tag_prefix, multi_constructor) + if Loader is None: + loader.Loader.add_multi_constructor(tag_prefix, multi_constructor) + loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor) + loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor) + else: + Loader.add_multi_constructor(tag_prefix, multi_constructor) def add_representer(data_type, representer, Dumper=Dumper): """ @@ -368,7 +351,12 @@ class YAMLObjectMetaclass(type): def __init__(cls, name, bases, kwds): super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds) if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None: - cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + if isinstance(cls.yaml_loader, list): + for loader in cls.yaml_loader: + loader.add_constructor(cls.yaml_tag, cls.from_yaml) + else: + cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml) + cls.yaml_dumper.add_representer(cls, cls.to_yaml) class YAMLObject(metaclass=YAMLObjectMetaclass): @@ -379,7 +367,7 @@ class YAMLObject(metaclass=YAMLObjectMetaclass): __slots__ = () # no direct instantiation, so allow immutable subclasses - yaml_loader = Loader + yaml_loader = [Loader, FullLoader, UnsafeLoader] yaml_dumper = Dumper yaml_tag = None diff --git a/src/rez/vendor/yaml/constructor.py b/src/rez/vendor/yaml/constructor.py index 34fc1ae92..619acd307 100644 --- a/src/rez/vendor/yaml/constructor.py +++ b/src/rez/vendor/yaml/constructor.py @@ -31,6 +31,14 @@ def check_data(self): # If there are more documents available? return self.check_node() + def check_state_key(self, key): + """Block special attributes/methods from being set in a newly created + object, to prevent user-controlled methods from being called during + deserialization""" + if self.get_state_keys_blacklist_regexp().match(key): + raise ConstructorError(None, None, + "blacklisted key '%s' in instance state found" % (key,), None) + def get_data(self): # Construct and return the next document. if self.check_node(): @@ -72,7 +80,7 @@ def construct_object(self, node, deep=False): constructor = self.yaml_constructors[node.tag] else: for tag_prefix in self.yaml_multi_constructors: - if node.tag.startswith(tag_prefix): + if tag_prefix is not None and node.tag.startswith(tag_prefix): tag_suffix = node.tag[len(tag_prefix):] constructor = self.yaml_multi_constructors[tag_prefix] break @@ -324,22 +332,23 @@ def construct_yaml_timestamp(self, node): minute = int(values['minute']) second = int(values['second']) fraction = 0 + tzinfo = None if values['fraction']: fraction = values['fraction'][:6] while len(fraction) < 6: fraction += '0' fraction = int(fraction) - delta = None if values['tz_sign']: tz_hour = int(values['tz_hour']) tz_minute = int(values['tz_minute'] or 0) delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute) if values['tz_sign'] == '-': delta = -delta - data = datetime.datetime(year, month, day, hour, minute, second, fraction) - if delta: - data -= delta - return data + tzinfo = datetime.timezone(delta) + elif values['tz']: + tzinfo = datetime.timezone.utc + return datetime.datetime(year, month, day, hour, minute, second, fraction, + tzinfo=tzinfo) def construct_yaml_omap(self, node): # Note: we do not check for duplicate keys, because it's too @@ -471,6 +480,16 @@ def construct_undefined(self, node): SafeConstructor.construct_undefined) class FullConstructor(SafeConstructor): + # 'extend' is blacklisted because it is used by + # construct_python_object_apply to add `listitems` to a newly generate + # python instance + def get_state_keys_blacklist(self): + return ['^extend$', '^__.*__$'] + + def get_state_keys_blacklist_regexp(self): + if not hasattr(self, 'state_keys_blacklist_regexp'): + self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')') + return self.state_keys_blacklist_regexp def construct_python_str(self, node): return self.construct_scalar(node) @@ -513,7 +532,7 @@ def find_python_module(self, name, mark, unsafe=False): except ImportError as exc: raise ConstructorError("while constructing a Python module", mark, "cannot find module %r (%s)" % (name, exc), mark) - if not name in sys.modules: + if name not in sys.modules: raise ConstructorError("while constructing a Python module", mark, "module %r is not imported" % name, mark) return sys.modules[name] @@ -533,7 +552,7 @@ def find_python_name(self, name, mark, unsafe=False): except ImportError as exc: raise ConstructorError("while constructing a Python object", mark, "cannot find module %r (%s)" % (module_name, exc), mark) - if not module_name in sys.modules: + if module_name not in sys.modules: raise ConstructorError("while constructing a Python object", mark, "module %r is not imported" % module_name, mark) module = sys.modules[module_name] @@ -573,7 +592,7 @@ def make_python_instance(self, suffix, node, else: return cls(*args, **kwds) - def set_python_instance_state(self, instance, state): + def set_python_instance_state(self, instance, state, unsafe=False): if hasattr(instance, '__setstate__'): instance.__setstate__(state) else: @@ -581,11 +600,16 @@ def set_python_instance_state(self, instance, state): if isinstance(state, tuple) and len(state) == 2: state, slotstate = state if hasattr(instance, '__dict__'): + if not unsafe and state: + for key in state.keys(): + self.check_state_key(key) instance.__dict__.update(state) elif state: slotstate.update(state) for key, value in slotstate.items(): - setattr(object, key, value) + if not unsafe: + self.check_state_key(key) + setattr(instance, key, value) def construct_python_object(self, suffix, node): # Format: @@ -686,22 +710,6 @@ def construct_python_object_new(self, suffix, node): 'tag:yaml.org,2002:python/name:', FullConstructor.construct_python_name) -FullConstructor.add_multi_constructor( - 'tag:yaml.org,2002:python/module:', - FullConstructor.construct_python_module) - -FullConstructor.add_multi_constructor( - 'tag:yaml.org,2002:python/object:', - FullConstructor.construct_python_object) - -FullConstructor.add_multi_constructor( - 'tag:yaml.org,2002:python/object/apply:', - FullConstructor.construct_python_object_apply) - -FullConstructor.add_multi_constructor( - 'tag:yaml.org,2002:python/object/new:', - FullConstructor.construct_python_object_new) - class UnsafeConstructor(FullConstructor): def find_python_module(self, name, mark): @@ -714,6 +722,26 @@ def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False) return super(UnsafeConstructor, self).make_python_instance( suffix, node, args, kwds, newobj, unsafe=True) + def set_python_instance_state(self, instance, state): + return super(UnsafeConstructor, self).set_python_instance_state( + instance, state, unsafe=True) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/module:', + UnsafeConstructor.construct_python_module) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object:', + UnsafeConstructor.construct_python_object) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/new:', + UnsafeConstructor.construct_python_object_new) + +UnsafeConstructor.add_multi_constructor( + 'tag:yaml.org,2002:python/object/apply:', + UnsafeConstructor.construct_python_object_apply) + # Constructor is same as UnsafeConstructor. Need to leave this in place in case # people have extended it directly. class Constructor(UnsafeConstructor): diff --git a/src/rez/vendor/yaml/cyaml.py b/src/rez/vendor/yaml/cyaml.py index 1e606c74b..0c2134587 100644 --- a/src/rez/vendor/yaml/cyaml.py +++ b/src/rez/vendor/yaml/cyaml.py @@ -4,7 +4,7 @@ 'CBaseDumper', 'CSafeDumper', 'CDumper' ] -from _yaml import CParser, CEmitter +from yaml._yaml import CParser, CEmitter from .constructor import * diff --git a/src/rez/vendor/yaml/loader.py b/src/rez/vendor/yaml/loader.py index 414cb2c15..e90c11224 100644 --- a/src/rez/vendor/yaml/loader.py +++ b/src/rez/vendor/yaml/loader.py @@ -51,7 +51,7 @@ def __init__(self, stream): # UnsafeLoader is the same as Loader (which is and was always unsafe on # untrusted input). Use of either Loader or UnsafeLoader should be rare, since # FullLoad should be able to load almost all YAML safely. Loader is left intact -# to ensure backwards compatability. +# to ensure backwards compatibility. class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver): def __init__(self, stream): diff --git a/src/rez/vendor/yaml/representer.py b/src/rez/vendor/yaml/representer.py index dd144017b..808ca06df 100644 --- a/src/rez/vendor/yaml/representer.py +++ b/src/rez/vendor/yaml/representer.py @@ -5,7 +5,7 @@ from .error import * from .nodes import * -import datetime, sys, copyreg, types, base64, collections +import datetime, copyreg, types, base64, collections class RepresenterError(YAMLError): pass @@ -369,7 +369,7 @@ def represent_ordered_dict(self, data): Representer.add_representer(tuple, Representer.represent_tuple) -Representer.add_representer(type, +Representer.add_multi_representer(type, Representer.represent_name) Representer.add_representer(collections.OrderedDict, diff --git a/src/rez/vendor/yaml/resolver.py b/src/rez/vendor/yaml/resolver.py index 02b82e73e..3522bdaaf 100644 --- a/src/rez/vendor/yaml/resolver.py +++ b/src/rez/vendor/yaml/resolver.py @@ -146,8 +146,8 @@ def resolve(self, kind, value, implicit): resolvers = self.yaml_implicit_resolvers.get('', []) else: resolvers = self.yaml_implicit_resolvers.get(value[0], []) - resolvers += self.yaml_implicit_resolvers.get(None, []) - for tag, regexp in resolvers: + wildcard_resolvers = self.yaml_implicit_resolvers.get(None, []) + for tag, regexp in resolvers + wildcard_resolvers: if regexp.match(value): return tag implicit = implicit[1] @@ -177,7 +177,7 @@ class Resolver(BaseResolver): Resolver.add_implicit_resolver( 'tag:yaml.org,2002:float', re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)? - |\.[0-9_]+(?:[eE][-+][0-9]+)? + |\.[0-9][0-9_]*(?:[eE][-+][0-9]+)? |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]* |[-+]?\.(?:inf|Inf|INF) |\.(?:nan|NaN|NAN))$''', re.X), diff --git a/src/rez/vendor/yaml/scanner.py b/src/rez/vendor/yaml/scanner.py index 775dbcc6d..de925b07f 100644 --- a/src/rez/vendor/yaml/scanner.py +++ b/src/rez/vendor/yaml/scanner.py @@ -332,7 +332,7 @@ def unwind_indent(self, column): ## } #if self.flow_level and self.indent > column: # raise ScannerError(None, None, - # "invalid intendation or unclosed '[' or '{'", + # "invalid indentation or unclosed '[' or '{'", # self.get_mark()) # In the flow context, indentation is ignored. We make the scanner less @@ -370,7 +370,7 @@ def fetch_stream_start(self): def fetch_stream_end(self): - # Set the current intendation to -1. + # Set the current indentation to -1. self.unwind_indent(-1) # Reset simple keys. @@ -389,7 +389,7 @@ def fetch_stream_end(self): def fetch_directive(self): - # Set the current intendation to -1. + # Set the current indentation to -1. self.unwind_indent(-1) # Reset simple keys. @@ -407,7 +407,7 @@ def fetch_document_end(self): def fetch_document_indicator(self, TokenClass): - # Set the current intendation to -1. + # Set the current indentation to -1. self.unwind_indent(-1) # Reset simple keys. Note that there could not be a block collection @@ -1211,7 +1211,7 @@ def scan_flow_scalar_non_spaces(self, double, start_mark): for k in range(length): if self.peek(k) not in '0123456789ABCDEFabcdef': raise ScannerError("while scanning a double-quoted scalar", start_mark, - "expected escape sequence of %d hexdecimal numbers, but found %r" % + "expected escape sequence of %d hexadecimal numbers, but found %r" % (length, self.peek(k)), self.get_mark()) code = int(self.prefix(length), 16) chunks.append(chr(code)) @@ -1403,7 +1403,7 @@ def scan_uri_escapes(self, name, start_mark): for k in range(2): if self.peek(k) not in '0123456789ABCDEFabcdef': raise ScannerError("while scanning a %s" % name, start_mark, - "expected URI escape sequence of 2 hexdecimal numbers, but found %r" + "expected URI escape sequence of 2 hexadecimal numbers, but found %r" % self.peek(k), self.get_mark()) codes.append(int(self.prefix(2), 16)) self.forward(2)