Update vendored windows libs

This commit is contained in:
Labrys of Knossos 2022-11-28 05:59:32 -05:00
parent f61c211655
commit b1cefa94e5
226 changed files with 33472 additions and 11882 deletions

View file

@ -0,0 +1,27 @@
# Copyright 2014-2016 Nathan West
#
# This file is part of autocommand.
#
# autocommand is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autocommand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with autocommand. If not, see <http://www.gnu.org/licenses/>.
# flake8 flags all these imports as unused, hence the NOQAs everywhere.
from .automain import automain # NOQA
from .autoparse import autoparse, smart_open # NOQA
from .autocommand import autocommand # NOQA
try:
from .autoasync import autoasync # NOQA
except ImportError: # pragma: no cover
pass

View file

@ -0,0 +1,142 @@
# Copyright 2014-2015 Nathan West
#
# This file is part of autocommand.
#
# autocommand is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autocommand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with autocommand. If not, see <http://www.gnu.org/licenses/>.
from asyncio import get_event_loop, iscoroutine
from functools import wraps
from inspect import signature
async def _run_forever_coro(coro, args, kwargs, loop):
'''
This helper function launches an async main function that was tagged with
forever=True. There are two possibilities:
- The function is a normal function, which handles initializing the event
loop, which is then run forever
- The function is a coroutine, which needs to be scheduled in the event
loop, which is then run forever
- There is also the possibility that the function is a normal function
wrapping a coroutine function
The function is therefore called unconditionally and scheduled in the event
loop if the return value is a coroutine object.
The reason this is a separate function is to make absolutely sure that all
the objects created are garbage collected after all is said and done; we
do this to ensure that any exceptions raised in the tasks are collected
ASAP.
'''
# Personal note: I consider this an antipattern, as it relies on the use of
# unowned resources. The setup function dumps some stuff into the event
# loop where it just whirls in the ether without a well defined owner or
# lifetime. For this reason, there's a good chance I'll remove the
# forever=True feature from autoasync at some point in the future.
thing = coro(*args, **kwargs)
if iscoroutine(thing):
await thing
def autoasync(coro=None, *, loop=None, forever=False, pass_loop=False):
'''
Convert an asyncio coroutine into a function which, when called, is
evaluted in an event loop, and the return value returned. This is intented
to make it easy to write entry points into asyncio coroutines, which
otherwise need to be explictly evaluted with an event loop's
run_until_complete.
If `loop` is given, it is used as the event loop to run the coro in. If it
is None (the default), the loop is retreived using asyncio.get_event_loop.
This call is defered until the decorated function is called, so that
callers can install custom event loops or event loop policies after
@autoasync is applied.
If `forever` is True, the loop is run forever after the decorated coroutine
is finished. Use this for servers created with asyncio.start_server and the
like.
If `pass_loop` is True, the event loop object is passed into the coroutine
as the `loop` kwarg when the wrapper function is called. In this case, the
wrapper function's __signature__ is updated to remove this parameter, so
that autoparse can still be used on it without generating a parameter for
`loop`.
This coroutine can be called with ( @autoasync(...) ) or without
( @autoasync ) arguments.
Examples:
@autoasync
def get_file(host, port):
reader, writer = yield from asyncio.open_connection(host, port)
data = reader.read()
sys.stdout.write(data.decode())
get_file(host, port)
@autoasync(forever=True, pass_loop=True)
def server(host, port, loop):
yield_from loop.create_server(Proto, host, port)
server('localhost', 8899)
'''
if coro is None:
return lambda c: autoasync(
c, loop=loop,
forever=forever,
pass_loop=pass_loop)
# The old and new signatures are required to correctly bind the loop
# parameter in 100% of cases, even if it's a positional parameter.
# NOTE: A future release will probably require the loop parameter to be
# a kwonly parameter.
if pass_loop:
old_sig = signature(coro)
new_sig = old_sig.replace(parameters=(
param for name, param in old_sig.parameters.items()
if name != "loop"))
@wraps(coro)
def autoasync_wrapper(*args, **kwargs):
# Defer the call to get_event_loop so that, if a custom policy is
# installed after the autoasync decorator, it is respected at call time
local_loop = get_event_loop() if loop is None else loop
# Inject the 'loop' argument. We have to use this signature binding to
# ensure it's injected in the correct place (positional, keyword, etc)
if pass_loop:
bound_args = old_sig.bind_partial()
bound_args.arguments.update(
loop=local_loop,
**new_sig.bind(*args, **kwargs).arguments)
args, kwargs = bound_args.args, bound_args.kwargs
if forever:
local_loop.create_task(_run_forever_coro(
coro, args, kwargs, local_loop
))
local_loop.run_forever()
else:
return local_loop.run_until_complete(coro(*args, **kwargs))
# Attach the updated signature. This allows 'pass_loop' to be used with
# autoparse
if pass_loop:
autoasync_wrapper.__signature__ = new_sig
return autoasync_wrapper

View file

@ -0,0 +1,70 @@
# Copyright 2014-2015 Nathan West
#
# This file is part of autocommand.
#
# autocommand is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autocommand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with autocommand. If not, see <http://www.gnu.org/licenses/>.
from .autoparse import autoparse
from .automain import automain
try:
from .autoasync import autoasync
except ImportError: # pragma: no cover
pass
def autocommand(
module, *,
description=None,
epilog=None,
add_nos=False,
parser=None,
loop=None,
forever=False,
pass_loop=False):
if callable(module):
raise TypeError('autocommand requires a module name argument')
def autocommand_decorator(func):
# Step 1: if requested, run it all in an asyncio event loop. autoasync
# patches the __signature__ of the decorated function, so that in the
# event that pass_loop is True, the `loop` parameter of the original
# function will *not* be interpreted as a command-line argument by
# autoparse
if loop is not None or forever or pass_loop:
func = autoasync(
func,
loop=None if loop is True else loop,
pass_loop=pass_loop,
forever=forever)
# Step 2: create parser. We do this second so that the arguments are
# parsed and passed *before* entering the asyncio event loop, if it
# exists. This simplifies the stack trace and ensures errors are
# reported earlier. It also ensures that errors raised during parsing &
# passing are still raised if `forever` is True.
func = autoparse(
func,
description=description,
epilog=epilog,
add_nos=add_nos,
parser=parser)
# Step 3: call the function automatically if __name__ == '__main__' (or
# if True was provided)
func = automain(module)(func)
return func
return autocommand_decorator

View file

@ -0,0 +1,59 @@
# Copyright 2014-2015 Nathan West
#
# This file is part of autocommand.
#
# autocommand is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autocommand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with autocommand. If not, see <http://www.gnu.org/licenses/>.
import sys
from .errors import AutocommandError
class AutomainRequiresModuleError(AutocommandError, TypeError):
pass
def automain(module, *, args=(), kwargs=None):
'''
This decorator automatically invokes a function if the module is being run
as the "__main__" module. Optionally, provide args or kwargs with which to
call the function. If `module` is "__main__", the function is called, and
the program is `sys.exit`ed with the return value. You can also pass `True`
to cause the function to be called unconditionally. If the function is not
called, it is returned unchanged by the decorator.
Usage:
@automain(__name__) # Pass __name__ to check __name__=="__main__"
def main():
...
If __name__ is "__main__" here, the main function is called, and then
sys.exit called with the return value.
'''
# Check that @automain(...) was called, rather than @automain
if callable(module):
raise AutomainRequiresModuleError(module)
if module == '__main__' or module is True:
if kwargs is None:
kwargs = {}
# Use a function definition instead of a lambda for a neater traceback
def automain_decorator(main):
sys.exit(main(*args, **kwargs))
return automain_decorator
else:
return lambda main: main

View file

@ -0,0 +1,333 @@
# Copyright 2014-2015 Nathan West
#
# This file is part of autocommand.
#
# autocommand is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autocommand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with autocommand. If not, see <http://www.gnu.org/licenses/>.
import sys
from re import compile as compile_regex
from inspect import signature, getdoc, Parameter
from argparse import ArgumentParser
from contextlib import contextmanager
from functools import wraps
from io import IOBase
from autocommand.errors import AutocommandError
_empty = Parameter.empty
class AnnotationError(AutocommandError):
'''Annotation error: annotation must be a string, type, or tuple of both'''
class PositionalArgError(AutocommandError):
'''
Postional Arg Error: autocommand can't handle postional-only parameters
'''
class KWArgError(AutocommandError):
'''kwarg Error: autocommand can't handle a **kwargs parameter'''
class DocstringError(AutocommandError):
'''Docstring error'''
class TooManySplitsError(DocstringError):
'''
The docstring had too many ---- section splits. Currently we only support
using up to a single split, to split the docstring into description and
epilog parts.
'''
def _get_type_description(annotation):
'''
Given an annotation, return the (type, description) for the parameter.
If you provide an annotation that is somehow both a string and a callable,
the behavior is undefined.
'''
if annotation is _empty:
return None, None
elif callable(annotation):
return annotation, None
elif isinstance(annotation, str):
return None, annotation
elif isinstance(annotation, tuple):
try:
arg1, arg2 = annotation
except ValueError as e:
raise AnnotationError(annotation) from e
else:
if callable(arg1) and isinstance(arg2, str):
return arg1, arg2
elif isinstance(arg1, str) and callable(arg2):
return arg2, arg1
raise AnnotationError(annotation)
def _add_arguments(param, parser, used_char_args, add_nos):
'''
Add the argument(s) to an ArgumentParser (using add_argument) for a given
parameter. used_char_args is the set of -short options currently already in
use, and is updated (if necessary) by this function. If add_nos is True,
this will also add an inverse switch for all boolean options. For
instance, for the boolean parameter "verbose", this will create --verbose
and --no-verbose.
'''
# Impl note: This function is kept separate from make_parser because it's
# already very long and I wanted to separate out as much as possible into
# its own call scope, to prevent even the possibility of suble mutation
# bugs.
if param.kind is param.POSITIONAL_ONLY:
raise PositionalArgError(param)
elif param.kind is param.VAR_KEYWORD:
raise KWArgError(param)
# These are the kwargs for the add_argument function.
arg_spec = {}
is_option = False
# Get the type and default from the annotation.
arg_type, description = _get_type_description(param.annotation)
# Get the default value
default = param.default
# If there is no explicit type, and the default is present and not None,
# infer the type from the default.
if arg_type is None and default not in {_empty, None}:
arg_type = type(default)
# Add default. The presence of a default means this is an option, not an
# argument.
if default is not _empty:
arg_spec['default'] = default
is_option = True
# Add the type
if arg_type is not None:
# Special case for bool: make it just a --switch
if arg_type is bool:
if not default or default is _empty:
arg_spec['action'] = 'store_true'
else:
arg_spec['action'] = 'store_false'
# Switches are always options
is_option = True
# Special case for file types: make it a string type, for filename
elif isinstance(default, IOBase):
arg_spec['type'] = str
# TODO: special case for list type.
# - How to specificy type of list members?
# - param: [int]
# - param: int =[]
# - action='append' vs nargs='*'
else:
arg_spec['type'] = arg_type
# nargs: if the signature includes *args, collect them as trailing CLI
# arguments in a list. *args can't have a default value, so it can never be
# an option.
if param.kind is param.VAR_POSITIONAL:
# TODO: consider depluralizing metavar/name here.
arg_spec['nargs'] = '*'
# Add description.
if description is not None:
arg_spec['help'] = description
# Get the --flags
flags = []
name = param.name
if is_option:
# Add the first letter as a -short option.
for letter in name[0], name[0].swapcase():
if letter not in used_char_args:
used_char_args.add(letter)
flags.append('-{}'.format(letter))
break
# If the parameter is a --long option, or is a -short option that
# somehow failed to get a flag, add it.
if len(name) > 1 or not flags:
flags.append('--{}'.format(name))
arg_spec['dest'] = name
else:
flags.append(name)
parser.add_argument(*flags, **arg_spec)
# Create the --no- version for boolean switches
if add_nos and arg_type is bool:
parser.add_argument(
'--no-{}'.format(name),
action='store_const',
dest=name,
const=default if default is not _empty else False)
def make_parser(func_sig, description, epilog, add_nos):
'''
Given the signature of a function, create an ArgumentParser
'''
parser = ArgumentParser(description=description, epilog=epilog)
used_char_args = {'h'}
# Arange the params so that single-character arguments are first. This
# esnures they don't have to get --long versions. sorted is stable, so the
# parameters will otherwise still be in relative order.
params = sorted(
func_sig.parameters.values(),
key=lambda param: len(param.name) > 1)
for param in params:
_add_arguments(param, parser, used_char_args, add_nos)
return parser
_DOCSTRING_SPLIT = compile_regex(r'\n\s*-{4,}\s*\n')
def parse_docstring(docstring):
'''
Given a docstring, parse it into a description and epilog part
'''
if docstring is None:
return '', ''
parts = _DOCSTRING_SPLIT.split(docstring)
if len(parts) == 1:
return docstring, ''
elif len(parts) == 2:
return parts[0], parts[1]
else:
raise TooManySplitsError()
def autoparse(
func=None, *,
description=None,
epilog=None,
add_nos=False,
parser=None):
'''
This decorator converts a function that takes normal arguments into a
function which takes a single optional argument, argv, parses it using an
argparse.ArgumentParser, and calls the underlying function with the parsed
arguments. If it is not given, sys.argv[1:] is used. This is so that the
function can be used as a setuptools entry point, as well as a normal main
function. sys.argv[1:] is not evaluated until the function is called, to
allow injecting different arguments for testing.
It uses the argument signature of the function to create an
ArgumentParser. Parameters without defaults become positional parameters,
while parameters *with* defaults become --options. Use annotations to set
the type of the parameter.
The `desctiption` and `epilog` parameters corrospond to the same respective
argparse parameters. If no description is given, it defaults to the
decorated functions's docstring, if present.
If add_nos is True, every boolean option (that is, every parameter with a
default of True/False or a type of bool) will have a --no- version created
as well, which inverts the option. For instance, the --verbose option will
have a --no-verbose counterpart. These are not mutually exclusive-
whichever one appears last in the argument list will have precedence.
If a parser is given, it is used instead of one generated from the function
signature. In this case, no parser is created; instead, the given parser is
used to parse the argv argument. The parser's results' argument names must
match up with the parameter names of the decorated function.
The decorated function is attached to the result as the `func` attribute,
and the parser is attached as the `parser` attribute.
'''
# If @autoparse(...) is used instead of @autoparse
if func is None:
return lambda f: autoparse(
f, description=description,
epilog=epilog,
add_nos=add_nos,
parser=parser)
func_sig = signature(func)
docstr_description, docstr_epilog = parse_docstring(getdoc(func))
if parser is None:
parser = make_parser(
func_sig,
description or docstr_description,
epilog or docstr_epilog,
add_nos)
@wraps(func)
def autoparse_wrapper(argv=None):
if argv is None:
argv = sys.argv[1:]
# Get empty argument binding, to fill with parsed arguments. This
# object does all the heavy lifting of turning named arguments into
# into correctly bound *args and **kwargs.
parsed_args = func_sig.bind_partial()
parsed_args.arguments.update(vars(parser.parse_args(argv)))
return func(*parsed_args.args, **parsed_args.kwargs)
# TODO: attach an updated __signature__ to autoparse_wrapper, just in case.
# Attach the wrapped function and parser, and return the wrapper.
autoparse_wrapper.func = func
autoparse_wrapper.parser = parser
return autoparse_wrapper
@contextmanager
def smart_open(filename_or_file, *args, **kwargs):
'''
This context manager allows you to open a filename, if you want to default
some already-existing file object, like sys.stdout, which shouldn't be
closed at the end of the context. If the filename argument is a str, bytes,
or int, the file object is created via a call to open with the given *args
and **kwargs, sent to the context, and closed at the end of the context,
just like "with open(filename) as f:". If it isn't one of the openable
types, the object simply sent to the context unchanged, and left unclosed
at the end of the context. Example:
def work_with_file(name=sys.stdout):
with smart_open(name) as f:
# Works correctly if name is a str filename or sys.stdout
print("Some stuff", file=f)
# If it was a filename, f is closed at the end here.
'''
if isinstance(filename_or_file, (str, bytes, int)):
with open(filename_or_file, *args, **kwargs) as file:
yield file
else:
yield filename_or_file

View file

@ -0,0 +1,23 @@
# Copyright 2014-2016 Nathan West
#
# This file is part of autocommand.
#
# autocommand is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# autocommand is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with autocommand. If not, see <http://www.gnu.org/licenses/>.
class AutocommandError(Exception):
'''Base class for autocommand exceptions'''
pass
# Individual modules will define errors specific to that module.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,39 @@
import ctypes
from jaraco.windows import environ
import os
getenv = ctypes.cdll.msvcrt.getenv
getenv.restype = ctypes.c_char_p
putenv = ctypes.cdll.msvcrt._putenv
def do_putenv(*pair):
return putenv("=".join(pair))
def print_environment_variable(key):
for method in (os.environ.get, os.getenv, environ.GetEnvironmentVariable, getenv):
try:
print(repr(method(key)))
except Exception as e:
print(e, end=' ')
print
def do_test():
key = 'TEST_PYTHON_ENVIRONMENT'
print_environment_variable(key)
methods = (
os.environ.__setitem__,
os.putenv,
environ.SetEnvironmentVariable,
do_putenv,
)
for i, method in enumerate(methods):
print('round', i)
method(key, 'value when using method %d' % i)
print_environment_variable(key)
if __name__ == '__main__':
do_test()

View file

@ -0,0 +1,69 @@
import os
def findpath(target, start=os.path.curdir):
r"""
Find a path from start to target where target is relative to start.
>>> orig_wd = os.getcwd()
>>> os.chdir('c:\\windows') # so we know what the working directory is
>>> findpath('d:\\')
'd:\\'
>>> findpath('d:\\', 'c:\\windows')
'd:\\'
>>> findpath('\\bar', 'd:\\')
'd:\\bar'
>>> findpath('\\bar', 'd:\\foo') # fails with '\\bar'
'd:\\bar'
>>> findpath('bar', 'd:\\foo')
'd:\\foo\\bar'
>>> findpath('bar\\baz', 'd:\\foo')
'd:\\foo\\bar\\baz'
>>> findpath('\\baz', 'd:\\foo\\bar') # fails with '\\baz'
'd:\\baz'
Since we're on the C drive, findpath may be allowed to return
relative paths for targets on the same drive. I use abspath to
confirm that the ultimate target is what we expect.
>>> os.path.abspath(findpath('\\bar'))
'c:\\bar'
>>> os.path.abspath(findpath('bar'))
'c:\\windows\\bar'
>>> findpath('..', 'd:\\foo\\bar')
'd:\\foo'
>>> findpath('..\\bar', 'd:\\foo')
'd:\\bar'
The parent of the root directory is the root directory.
>>> findpath('..', 'd:\\')
'd:\\'
restore the original working directory
>>> os.chdir(orig_wd)
"""
return os.path.normpath(os.path.join(start, target))
def main():
import sys
if sys.argv[1:]:
print(findpath(*sys.argv[1:]))
else:
import doctest
doctest.testmod()
if __name__ == '__main__':
main()

View file

@ -0,0 +1,21 @@
from ctypes import CDLL, c_char_p
def get_libc():
libnames = ('msvcrt', 'libc.so.6')
for libname in libnames:
try:
return CDLL(libname)
except WindowsError:
pass
except OSError:
pass
raise RuntimeError("Unable to find a suitable libc (tried %s)" % libnames)
getenv = get_libc().getenv
getenv.restype = c_char_p
# call into your linked module here
print('new value is', getenv('FOO'))

View file

@ -0,0 +1,29 @@
import os
import sys
try:
from jaraco.windows.filesystem import symlink
except ImportError:
# a dirty reimplementation of symlink from jaraco.windows
from ctypes import windll
from ctypes.wintypes import LPWSTR, DWORD, BOOLEAN
CreateSymbolicLink = windll.kernel32.CreateSymbolicLinkW
CreateSymbolicLink.argtypes = (LPWSTR, LPWSTR, DWORD)
CreateSymbolicLink.restype = BOOLEAN
def symlink(link, target, target_is_directory=False):
"""
An implementation of os.symlink for Windows (Vista and greater)
"""
target_is_directory = target_is_directory or os.path.isdir(target)
CreateSymbolicLink(link, target, target_is_directory)
assert sys.platform in ('win32',)
os.makedirs(r'.\foo')
assert os.path.isdir(r'.\foo')
symlink(r'.\foo_sym', r'.\foo')
assert os.path.isdir(r'.\foo_sym')
assert os.path.islink(r'.\foo_sym') # fails

View file

@ -0,0 +1,20 @@
# reported at http://social.msdn.microsoft.com/Forums/en-US/wsk/thread/f43c2faf-3df3-4f11-9f5e-1a9101753f93
from win32wnet import WNetAddConnection2, NETRESOURCE
resource = NETRESOURCE()
resource.lpRemoteName = r'\\aoshi\users'
username = 'jaraco'
res = WNetAddConnection2(resource, UserName=username)
print('first result is', res)
res = WNetAddConnection2(resource, UserName=username)
print('second result is', res)
"""
Output is:
first result is None
Traceback (most recent call last):
File ".\wnetaddconnection2-error-on-64-bit.py", line 7, in <module>
res = WNetAddConnection2(resource, UserName=username)
pywintypes.error: (1219, 'WNetAddConnection2', 'Multiple connections to a server or shared resource by the same user, using more than one user name, are not allowed. Disconnect all previous connections to the server or shared resource and try again.')
"""

View file

@ -1,17 +0,0 @@
from .api import distribution, Distribution, PackageNotFoundError # noqa: F401
from .api import metadata, entry_points, resolve, version, read_text
# Import for installation side-effects.
from . import _hooks # noqa: F401
__all__ = [
'metadata',
'entry_points',
'resolve',
'version',
'read_text',
]
__version__ = version(__name__)

View file

@ -1,148 +0,0 @@
from __future__ import unicode_literals, absolute_import
import re
import sys
import itertools
from .api import Distribution
from zipfile import ZipFile
if sys.version_info >= (3,): # pragma: nocover
from contextlib import suppress
from pathlib import Path
else: # pragma: nocover
from contextlib2 import suppress # noqa
from itertools import imap as map # type: ignore
from pathlib2 import Path
FileNotFoundError = IOError, OSError
__metaclass__ = type
def install(cls):
"""Class decorator for installation on sys.meta_path."""
sys.meta_path.append(cls)
return cls
class NullFinder:
@staticmethod
def find_spec(*args, **kwargs):
return None
# In Python 2, the import system requires finders
# to have a find_module() method, but this usage
# is deprecated in Python 3 in favor of find_spec().
# For the purposes of this finder (i.e. being present
# on sys.meta_path but having no other import
# system functionality), the two methods are identical.
find_module = find_spec
@install
class MetadataPathFinder(NullFinder):
"""A degenerate finder for distribution packages on the file system.
This finder supplies only a find_distribution() method for versions
of Python that do not have a PathFinder find_distribution().
"""
search_template = r'{name}(-.*)?\.(dist|egg)-info'
@classmethod
def find_distribution(cls, name):
paths = cls._search_paths(name)
dists = map(PathDistribution, paths)
return next(dists, None)
@classmethod
def _search_paths(cls, name):
"""
Find metadata directories in sys.path heuristically.
"""
return itertools.chain.from_iterable(
cls._search_path(path, name)
for path in map(Path, sys.path)
)
@classmethod
def _search_path(cls, root, name):
if not root.is_dir():
return ()
normalized = name.replace('-', '_')
return (
item
for item in root.iterdir()
if item.is_dir()
and re.match(
cls.search_template.format(name=normalized),
str(item.name),
flags=re.IGNORECASE,
)
)
class PathDistribution(Distribution):
def __init__(self, path):
"""Construct a distribution from a path to the metadata directory."""
self._path = path
def read_text(self, filename):
with suppress(FileNotFoundError):
with self._path.joinpath(filename).open(encoding='utf-8') as fp:
return fp.read()
return None
read_text.__doc__ = Distribution.read_text.__doc__
@install
class WheelMetadataFinder(NullFinder):
"""A degenerate finder for distribution packages in wheels.
This finder supplies only a find_distribution() method for versions
of Python that do not have a PathFinder find_distribution().
"""
search_template = r'{name}(-.*)?\.whl'
@classmethod
def find_distribution(cls, name):
paths = cls._search_paths(name)
dists = map(WheelDistribution, paths)
return next(dists, None)
@classmethod
def _search_paths(cls, name):
return (
item
for item in map(Path, sys.path)
if re.match(
cls.search_template.format(name=name),
str(item.name),
flags=re.IGNORECASE,
)
)
class WheelDistribution(Distribution):
def __init__(self, archive):
self._archive = archive
name, version = archive.name.split('-')[0:2]
self._dist_info = '{}-{}.dist-info'.format(name, version)
def read_text(self, filename):
with ZipFile(_path_to_filename(self._archive)) as zf:
with suppress(KeyError):
as_bytes = zf.read('{}/{}'.format(self._dist_info, filename))
return as_bytes.decode('utf-8')
return None
read_text.__doc__ = Distribution.read_text.__doc__
def _path_to_filename(path): # pragma: nocover
"""
On non-compliant systems, ensure a path-like object is
a string.
"""
try:
return path.__fspath__()
except AttributeError:
return str(path)

View file

@ -1,146 +0,0 @@
import io
import abc
import sys
import email
from importlib import import_module
if sys.version_info > (3,): # pragma: nocover
from configparser import ConfigParser
else: # pragma: nocover
from ConfigParser import SafeConfigParser as ConfigParser
try:
BaseClass = ModuleNotFoundError
except NameError: # pragma: nocover
BaseClass = ImportError # type: ignore
__metaclass__ = type
class PackageNotFoundError(BaseClass):
"""The package was not found."""
class Distribution:
"""A Python distribution package."""
@abc.abstractmethod
def read_text(self, filename):
"""Attempt to load metadata file given by the name.
:param filename: The name of the file in the distribution info.
:return: The text if found, otherwise None.
"""
@classmethod
def from_name(cls, name):
"""Return the Distribution for the given package name.
:param name: The name of the distribution package to search for.
:return: The Distribution instance (or subclass thereof) for the named
package, if found.
:raises PackageNotFoundError: When the named package's distribution
metadata cannot be found.
"""
for resolver in cls._discover_resolvers():
resolved = resolver(name)
if resolved is not None:
return resolved
else:
raise PackageNotFoundError(name)
@staticmethod
def _discover_resolvers():
"""Search the meta_path for resolvers."""
declared = (
getattr(finder, 'find_distribution', None)
for finder in sys.meta_path
)
return filter(None, declared)
@property
def metadata(self):
"""Return the parsed metadata for this Distribution.
The returned object will have keys that name the various bits of
metadata. See PEP 566 for details.
"""
return email.message_from_string(
self.read_text('METADATA') or self.read_text('PKG-INFO')
)
@property
def version(self):
"""Return the 'Version' metadata for the distribution package."""
return self.metadata['Version']
def distribution(package):
"""Get the ``Distribution`` instance for the given package.
:param package: The name of the package as a string.
:return: A ``Distribution`` instance (or subclass thereof).
"""
return Distribution.from_name(package)
def metadata(package):
"""Get the metadata for the package.
:param package: The name of the distribution package to query.
:return: An email.Message containing the parsed metadata.
"""
return Distribution.from_name(package).metadata
def version(package):
"""Get the version string for the named package.
:param package: The name of the distribution package to query.
:return: The version string for the package as defined in the package's
"Version" metadata key.
"""
return distribution(package).version
def entry_points(name):
"""Return the entry points for the named distribution package.
:param name: The name of the distribution package to query.
:return: A ConfigParser instance where the sections and keys are taken
from the entry_points.txt ini-style contents.
"""
as_string = read_text(name, 'entry_points.txt')
# 2018-09-10(barry): Should we provide any options here, or let the caller
# send options to the underlying ConfigParser? For now, YAGNI.
config = ConfigParser()
try:
config.read_string(as_string)
except AttributeError: # pragma: nocover
# Python 2 has no read_string
config.readfp(io.StringIO(as_string))
return config
def resolve(entry_point):
"""Resolve an entry point string into the named callable.
:param entry_point: An entry point string of the form
`path.to.module:callable`.
:return: The actual callable object `path.to.module.callable`
:raises ValueError: When `entry_point` doesn't have the proper format.
"""
path, colon, name = entry_point.rpartition(':')
if colon != ':':
raise ValueError('Not an entry point: {}'.format(entry_point))
module = import_module(path)
return getattr(module, name)
def read_text(package, filename):
"""
Read the text of the file in the distribution info directory.
"""
return distribution(package).read_text(filename)

View file

@ -1,57 +0,0 @@
=========================
importlib_metadata NEWS
=========================
0.7 (2018-11-27)
================
* Fixed issue where packages with dashes in their names would
not be discovered. Closes #21.
* Distribution lookup is now case-insensitive. Closes #20.
* Wheel distributions can no longer be discovered by their module
name. Like Path distributions, they must be indicated by their
distribution package name.
0.6 (2018-10-07)
================
* Removed ``importlib_metadata.distribution`` function. Now
the public interface is primarily the utility functions exposed
in ``importlib_metadata.__all__``. Closes #14.
* Added two new utility functions ``read_text`` and
``metadata``.
0.5 (2018-09-18)
================
* Updated README and removed details about Distribution
class, now considered private. Closes #15.
* Added test suite support for Python 3.4+.
* Fixed SyntaxErrors on Python 3.4 and 3.5. !12
* Fixed errors on Windows joining Path elements. !15
0.4 (2018-09-14)
================
* Housekeeping.
0.3 (2018-09-14)
================
* Added usage documentation. Closes #8
* Add support for getting metadata from wheels on ``sys.path``. Closes #9
0.2 (2018-09-11)
================
* Added ``importlib_metadata.entry_points()``. Closes #1
* Added ``importlib_metadata.resolve()``. Closes #12
* Add support for Python 2.7. Closes #4
0.1 (2018-09-10)
================
* Initial release.
..
Local Variables:
mode: change-log-mode
indent-tabs-mode: nil
sentence-end-double-space: t
fill-column: 78
coding: utf-8
End:

View file

@ -1,180 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# flake8: noqa
#
# importlib_metadata documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 30 10:21:00 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'importlib_metadata'
copyright = '2017-2018, Jason Coombs, Barry Warsaw'
author = 'Jason Coombs, Barry Warsaw'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'importlib_metadatadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'importlib_metadata.tex', 'importlib\\_metadata Documentation',
'Brett Cannon, Barry Warsaw', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'importlib_metadata', 'importlib_metadata Documentation',
author, 'importlib_metadata', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
}

View file

@ -1,53 +0,0 @@
===============================
Welcome to importlib_metadata
===============================
``importlib_metadata`` is a library which provides an API for accessing an
installed package's `metadata`_, such as its entry points or its top-level
name. This functionality intends to replace most uses of ``pkg_resources``
`entry point API`_ and `metadata API`_. Along with ``importlib.resources`` in
`Python 3.7 and newer`_ (backported as `importlib_resources`_ for older
versions of Python), this can eliminate the need to use the older and less
efficient ``pkg_resources`` package.
``importlib_metadata`` is a backport of Python 3.8's standard library
`importlib.metadata`_ module for Python 2.7, and 3.4 through 3.7. Users of
Python 3.8 and beyond are encouraged to use the standard library module, and
in fact for these versions, ``importlib_metadata`` just shadows that module.
Developers looking for detailed API descriptions should refer to the Python
3.8 standard library documentation.
The documentation here includes a general :ref:`usage <using>` guide.
.. toctree::
:maxdepth: 2
:caption: Contents:
using.rst
changelog.rst
Project details
===============
* Project home: https://gitlab.com/python-devs/importlib_metadata
* Report bugs at: https://gitlab.com/python-devs/importlib_metadata/issues
* Code hosting: https://gitlab.com/python-devs/importlib_metadata.git
* Documentation: http://importlib_metadata.readthedocs.io/
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
.. _`metadata`: https://www.python.org/dev/peps/pep-0566/
.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api
.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources
.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html
.. _`importlib.metadata`: TBD

View file

@ -1,133 +0,0 @@
.. _using:
==========================
Using importlib_metadata
==========================
``importlib_metadata`` is a library that provides for access to installed
package metadata. Built in part on Python's import system, this library
intends to replace similar functionality in ``pkg_resources`` `entry point
API`_ and `metadata API`_. Along with ``importlib.resources`` in `Python 3.7
and newer`_ (backported as `importlib_resources`_ for older versions of
Python), this can eliminate the need to use the older and less efficient
``pkg_resources`` package.
By "installed package" we generally mean a third party package installed into
Python's ``site-packages`` directory via tools such as ``pip``. Specifically,
it means a package with either a discoverable ``dist-info`` or ``egg-info``
directory, and metadata defined by `PEP 566`_ or its older specifications.
By default, package metadata can live on the file system or in wheels on
``sys.path``. Through an extension mechanism, the metadata can live almost
anywhere.
Overview
========
Let's say you wanted to get the version string for a package you've installed
using ``pip``. We start by creating a virtual environment and installing
something into it::
$ python3 -m venv example
$ source example/bin/activate
(example) $ pip install importlib_metadata
(example) $ pip install wheel
You can get the version string for ``wheel`` by running the following::
(example) $ python
>>> from importlib_metadata import version
>>> version('wheel')
'0.31.1'
You can also get the set of entry points for the ``wheel`` package. Since the
``entry_points.txt`` file is an ``.ini``-style, the ``entry_points()``
function returns a `ConfigParser instance`_. To get the list of command line
entry points, extract the ``console_scripts`` section::
>>> cp = entry_points('wheel')
>>> cp.options('console_scripts')
['wheel']
You can also get the callable that the entry point is mapped to::
>>> cp.get('console_scripts', 'wheel')
'wheel.tool:main'
Even more conveniently, you can resolve this entry point to the actual
callable::
>>> from importlib_metadata import resolve
>>> ep = cp.get('console_scripts', 'wheel')
>>> resolve(ep)
<function main at 0x111b91bf8>
Distributions
=============
While the above API is the most common and convenient usage, you can get all
of that information from the ``Distribution`` class. A ``Distribution`` is an
abstract object that represents the metadata for a Python package. You can
get the ``Distribution`` instance::
>>> from importlib_metadata import distribution
>>> dist = distribution('wheel')
Thus, an alternative way to get the version number is through the
``Distribution`` instance::
>>> dist.version
'0.31.1'
There are all kinds of additional metadata available on the ``Distribution``
instance::
>>> d.metadata['Requires-Python']
'>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*'
>>> d.metadata['License']
'MIT'
The full set of available metadata is not described here. See PEP 566 for
additional details.
Extending the search algorithm
==============================
Because package metadata is not available through ``sys.path`` searches, or
package loaders directly, the metadata for a package is found through import
system `finders`_. To find a distribution package's metadata,
``importlib_metadata`` queries the list of `meta path finders`_ on
`sys.meta_path`_.
By default ``importlib_metadata`` installs a finder for packages found on the
file system. This finder doesn't actually find any *packages*, but it cany
find the package's metadata.
The abstract class :py:class:`importlib.abc.MetaPathFinder` defines the
interface expected of finders by Python's import system.
``importlib_metadata`` extends this protocol by looking for an optional
``find_distribution()`` ``@classmethod`` on the finders from
``sys.meta_path``. If the finder has this method, it takes a single argument
which is the name of the distribution package to find. The method returns
``None`` if it cannot find the distribution package, otherwise it returns an
instance of the ``Distribution`` abstract class.
What this means in practice is that to support finding distribution package
metadata in locations other than the file system, you should derive from
``Distribution`` and implement the ``load_metadata()`` method. This takes a
single argument which is the name of the package whose metadata is being
found. This instance of the ``Distribution`` base abstract class is what your
finder's ``find_distribution()`` method should return.
.. _`entry point API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#entry-points
.. _`metadata API`: https://setuptools.readthedocs.io/en/latest/pkg_resources.html#metadata-api
.. _`Python 3.7 and newer`: https://docs.python.org/3/library/importlib.html#module-importlib.resources
.. _`importlib_resources`: https://importlib-resources.readthedocs.io/en/latest/index.html
.. _`PEP 566`: https://www.python.org/dev/peps/pep-0566/
.. _`ConfigParser instance`: https://docs.python.org/3/library/configparser.html#configparser.ConfigParser
.. _`finders`: https://docs.python.org/3/reference/import.html#finders-and-loaders
.. _`meta path finders`: https://docs.python.org/3/glossary.html#term-meta-path-finder
.. _`sys.meta_path`: https://docs.python.org/3/library/sys.html#sys.meta_path

View file

@ -1,44 +0,0 @@
import re
import unittest
import importlib_metadata
class APITests(unittest.TestCase):
version_pattern = r'\d+\.\d+(\.\d)?'
def test_retrieves_version_of_self(self):
version = importlib_metadata.version('importlib_metadata')
assert isinstance(version, str)
assert re.match(self.version_pattern, version)
def test_retrieves_version_of_pip(self):
# Assume pip is installed and retrieve the version of pip.
version = importlib_metadata.version('pip')
assert isinstance(version, str)
assert re.match(self.version_pattern, version)
def test_for_name_does_not_exist(self):
with self.assertRaises(importlib_metadata.PackageNotFoundError):
importlib_metadata.distribution('does-not-exist')
def test_for_top_level(self):
distribution = importlib_metadata.distribution('importlib_metadata')
self.assertEqual(
distribution.read_text('top_level.txt').strip(),
'importlib_metadata')
def test_entry_points(self):
parser = importlib_metadata.entry_points('pip')
# We should probably not be dependent on a third party package's
# internal API staying stable.
entry_point = parser.get('console_scripts', 'pip')
self.assertEqual(entry_point, 'pip._internal:main')
def test_metadata_for_this_package(self):
md = importlib_metadata.metadata('importlib_metadata')
assert md['author'] == 'Barry Warsaw'
assert md['LICENSE'] == 'Apache Software License'
assert md['Name'] == 'importlib-metadata'
classifiers = md.get_all('Classifier')
assert 'Topic :: Software Development :: Libraries' in classifiers

View file

@ -1,121 +0,0 @@
from __future__ import unicode_literals
import re
import sys
import shutil
import tempfile
import unittest
import importlib
import contextlib
import importlib_metadata
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
try:
import pathlib
except ImportError:
import pathlib2 as pathlib
from importlib_metadata import _hooks
class BasicTests(unittest.TestCase):
version_pattern = r'\d+\.\d+(\.\d)?'
def test_retrieves_version_of_pip(self):
# Assume pip is installed and retrieve the version of pip.
dist = importlib_metadata.Distribution.from_name('pip')
assert isinstance(dist.version, str)
assert re.match(self.version_pattern, dist.version)
def test_for_name_does_not_exist(self):
with self.assertRaises(importlib_metadata.PackageNotFoundError):
importlib_metadata.Distribution.from_name('does-not-exist')
def test_new_style_classes(self):
self.assertIsInstance(importlib_metadata.Distribution, type)
self.assertIsInstance(_hooks.MetadataPathFinder, type)
self.assertIsInstance(_hooks.WheelMetadataFinder, type)
self.assertIsInstance(_hooks.WheelDistribution, type)
class ImportTests(unittest.TestCase):
def test_import_nonexistent_module(self):
# Ensure that the MetadataPathFinder does not crash an import of a
# non-existant module.
with self.assertRaises(ImportError):
importlib.import_module('does_not_exist')
def test_resolve(self):
entry_points = importlib_metadata.entry_points('pip')
main = importlib_metadata.resolve(
entry_points.get('console_scripts', 'pip'))
import pip._internal
self.assertEqual(main, pip._internal.main)
def test_resolve_invalid(self):
self.assertRaises(ValueError, importlib_metadata.resolve, 'bogus.ep')
class NameNormalizationTests(unittest.TestCase):
@staticmethod
def pkg_with_dashes(site_dir):
"""
Create minimal metadata for a package with dashes
in the name (and thus underscores in the filename).
"""
metadata_dir = site_dir / 'my_pkg.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w') as strm:
strm.write('Version: 1.0\n')
return 'my-pkg'
@staticmethod
@contextlib.contextmanager
def site_dir():
tmpdir = tempfile.mkdtemp()
sys.path[:0] = [tmpdir]
try:
yield pathlib.Path(tmpdir)
finally:
sys.path.remove(tmpdir)
shutil.rmtree(tmpdir)
def setUp(self):
self.fixtures = ExitStack()
self.addCleanup(self.fixtures.close)
self.site_dir = self.fixtures.enter_context(self.site_dir())
def test_dashes_in_dist_name_found_as_underscores(self):
"""
For a package with a dash in the name, the dist-info metadata
uses underscores in the name. Ensure the metadata loads.
"""
pkg_name = self.pkg_with_dashes(self.site_dir)
assert importlib_metadata.version(pkg_name) == '1.0'
@staticmethod
def pkg_with_mixed_case(site_dir):
"""
Create minimal metadata for a package with mixed case
in the name.
"""
metadata_dir = site_dir / 'CherryPy.dist-info'
metadata_dir.mkdir()
metadata = metadata_dir / 'METADATA'
with metadata.open('w') as strm:
strm.write('Version: 1.0\n')
return 'CherryPy'
def test_dist_name_found_as_any_case(self):
"""
Ensure the metadata loads when queried with any case.
"""
pkg_name = self.pkg_with_mixed_case(self.site_dir)
assert importlib_metadata.version(pkg_name) == '1.0'
assert importlib_metadata.version(pkg_name.lower()) == '1.0'
assert importlib_metadata.version(pkg_name.upper()) == '1.0'

View file

@ -1,42 +0,0 @@
import sys
import unittest
import importlib_metadata
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
from importlib_resources import path
class BespokeLoader:
archive = 'bespoke'
class TestZip(unittest.TestCase):
def setUp(self):
# Find the path to the example.*.whl so we can add it to the front of
# sys.path, where we'll then try to find the metadata thereof.
self.resources = ExitStack()
self.addCleanup(self.resources.close)
wheel = self.resources.enter_context(
path('importlib_metadata.tests.data',
'example-21.12-py3-none-any.whl'))
sys.path.insert(0, str(wheel))
self.resources.callback(sys.path.pop, 0)
def test_zip_version(self):
self.assertEqual(importlib_metadata.version('example'), '21.12')
def test_zip_entry_points(self):
parser = importlib_metadata.entry_points('example')
entry_point = parser.get('console_scripts', 'example')
self.assertEqual(entry_point, 'example:main')
def test_missing_metadata(self):
distribution = importlib_metadata.distribution('example')
self.assertIsNone(distribution.read_text('does not exist'))
def test_case_insensitive(self):
self.assertEqual(importlib_metadata.version('Example'), '21.12')

View file

@ -1 +0,0 @@
0.7

View file

@ -0,0 +1,36 @@
"""Read resources contained within a package."""
from ._common import (
as_file,
files,
Package,
)
from ._legacy import (
contents,
open_binary,
read_binary,
open_text,
read_text,
is_resource,
path,
Resource,
)
from .abc import ResourceReader
__all__ = [
'Package',
'Resource',
'ResourceReader',
'as_file',
'contents',
'files',
'is_resource',
'open_binary',
'open_text',
'path',
'read_binary',
'read_text',
]

View file

@ -0,0 +1,170 @@
from contextlib import suppress
from io import TextIOWrapper
from . import abc
class SpecLoaderAdapter:
"""
Adapt a package spec to adapt the underlying loader.
"""
def __init__(self, spec, adapter=lambda spec: spec.loader):
self.spec = spec
self.loader = adapter(spec)
def __getattr__(self, name):
return getattr(self.spec, name)
class TraversableResourcesLoader:
"""
Adapt a loader to provide TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
def get_resource_reader(self, name):
return CompatibilityFiles(self.spec)._native()
def _io_wrapper(file, mode='r', *args, **kwargs):
if mode == 'r':
return TextIOWrapper(file, *args, **kwargs)
elif mode == 'rb':
return file
raise ValueError(
"Invalid mode value '{}', only 'r' and 'rb' are supported".format(mode)
)
class CompatibilityFiles:
"""
Adapter for an existing or non-existent resource reader
to provide a compatibility .files().
"""
class SpecPath(abc.Traversable):
"""
Path tied to a module spec.
Can be read and exposes the resource reader children.
"""
def __init__(self, spec, reader):
self._spec = spec
self._reader = reader
def iterdir(self):
if not self._reader:
return iter(())
return iter(
CompatibilityFiles.ChildPath(self._reader, path)
for path in self._reader.contents()
)
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
if not self._reader:
return CompatibilityFiles.OrphanPath(other)
return CompatibilityFiles.ChildPath(self._reader, other)
@property
def name(self):
return self._spec.name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
class ChildPath(abc.Traversable):
"""
Path tied to a resource reader child.
Can be read but doesn't expose any meaningful children.
"""
def __init__(self, reader, name):
self._reader = reader
self._name = name
def iterdir(self):
return iter(())
def is_file(self):
return self._reader.is_resource(self.name)
def is_dir(self):
return not self.is_file()
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(self.name, other)
@property
def name(self):
return self._name
def open(self, mode='r', *args, **kwargs):
return _io_wrapper(
self._reader.open_resource(self.name), mode, *args, **kwargs
)
class OrphanPath(abc.Traversable):
"""
Orphan path, not tied to a module spec or resource reader.
Can't be read and doesn't expose any meaningful children.
"""
def __init__(self, *path_parts):
if len(path_parts) < 1:
raise ValueError('Need at least one path part to construct a path')
self._path = path_parts
def iterdir(self):
return iter(())
def is_file(self):
return False
is_dir = is_file
def joinpath(self, other):
return CompatibilityFiles.OrphanPath(*self._path, other)
@property
def name(self):
return self._path[-1]
def open(self, mode='r', *args, **kwargs):
raise FileNotFoundError("Can't open orphan path")
def __init__(self, spec):
self.spec = spec
@property
def _reader(self):
with suppress(AttributeError):
return self.spec.loader.get_resource_reader(self.spec.name)
def _native(self):
"""
Return the native reader if it supports files().
"""
reader = self._reader
return reader if hasattr(reader, 'files') else self
def __getattr__(self, attr):
return getattr(self._reader, attr)
def files(self):
return CompatibilityFiles.SpecPath(self.spec, self._reader)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
"""
return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)

View file

@ -0,0 +1,207 @@
import os
import pathlib
import tempfile
import functools
import contextlib
import types
import importlib
import inspect
import warnings
import itertools
from typing import Union, Optional, cast
from .abc import ResourceReader, Traversable
from ._compat import wrap_spec
Package = Union[types.ModuleType, str]
Anchor = Package
def package_to_anchor(func):
"""
Replace 'package' parameter as 'anchor' and warn about the change.
Other errors should fall through.
>>> files('a', 'b')
Traceback (most recent call last):
TypeError: files() takes from 0 to 1 positional arguments but 2 were given
"""
undefined = object()
@functools.wraps(func)
def wrapper(anchor=undefined, package=undefined):
if package is not undefined:
if anchor is not undefined:
return func(anchor, package)
warnings.warn(
"First parameter to files is renamed to 'anchor'",
DeprecationWarning,
stacklevel=2,
)
return func(package)
elif anchor is undefined:
return func()
return func(anchor)
return wrapper
@package_to_anchor
def files(anchor: Optional[Anchor] = None) -> Traversable:
"""
Get a Traversable resource for an anchor.
"""
return from_package(resolve(anchor))
def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
"""
Return the package's loader if it's a ResourceReader.
"""
# We can't use
# a issubclass() check here because apparently abc.'s __subclasscheck__()
# hook wants to create a weak reference to the object, but
# zipimport.zipimporter does not support weak references, resulting in a
# TypeError. That seems terrible.
spec = package.__spec__
reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
if reader is None:
return None
return reader(spec.name) # type: ignore
@functools.singledispatch
def resolve(cand: Optional[Anchor]) -> types.ModuleType:
return cast(types.ModuleType, cand)
@resolve.register
def _(cand: str) -> types.ModuleType:
return importlib.import_module(cand)
@resolve.register
def _(cand: None) -> types.ModuleType:
return resolve(_infer_caller().f_globals['__name__'])
def _infer_caller():
"""
Walk the stack and find the frame of the first caller not in this module.
"""
def is_this_file(frame_info):
return frame_info.filename == __file__
def is_wrapper(frame_info):
return frame_info.function == 'wrapper'
not_this_file = itertools.filterfalse(is_this_file, inspect.stack())
# also exclude 'wrapper' due to singledispatch in the call stack
callers = itertools.filterfalse(is_wrapper, not_this_file)
return next(callers).frame
def from_package(package: types.ModuleType):
"""
Return a Traversable object for the given package.
"""
spec = wrap_spec(package)
reader = spec.loader.get_resource_reader(spec.name)
return reader.files()
@contextlib.contextmanager
def _tempfile(
reader,
suffix='',
# gh-93353: Keep a reference to call os.remove() in late Python
# finalization.
*,
_os_remove=os.remove,
):
# Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
# blocks due to the need to close the temporary file to work on Windows
# properly.
fd, raw_path = tempfile.mkstemp(suffix=suffix)
try:
try:
os.write(fd, reader())
finally:
os.close(fd)
del reader
yield pathlib.Path(raw_path)
finally:
try:
_os_remove(raw_path)
except FileNotFoundError:
pass
def _temp_file(path):
return _tempfile(path.read_bytes, suffix=path.name)
def _is_present_dir(path: Traversable) -> bool:
"""
Some Traversables implement ``is_dir()`` to raise an
exception (i.e. ``FileNotFoundError``) when the
directory doesn't exist. This function wraps that call
to always return a boolean and only return True
if there's a dir and it exists.
"""
with contextlib.suppress(FileNotFoundError):
return path.is_dir()
return False
@functools.singledispatch
def as_file(path):
"""
Given a Traversable object, return that object as a
path on the local file system in a context manager.
"""
return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
@as_file.register(pathlib.Path)
@contextlib.contextmanager
def _(path):
"""
Degenerate behavior for pathlib.Path objects.
"""
yield path
@contextlib.contextmanager
def _temp_path(dir: tempfile.TemporaryDirectory):
"""
Wrap tempfile.TemporyDirectory to return a pathlib object.
"""
with dir as result:
yield pathlib.Path(result)
@contextlib.contextmanager
def _temp_dir(path):
"""
Given a traversable dir, recursively replicate the whole tree
to the file system in a context manager.
"""
assert path.is_dir()
with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
yield _write_contents(temp_dir, path)
def _write_contents(target, source):
child = target.joinpath(source.name)
if source.is_dir():
child.mkdir()
for item in source.iterdir():
_write_contents(child, item)
else:
child.open('wb').write(source.read_bytes())
return child

View file

@ -0,0 +1,108 @@
# flake8: noqa
import abc
import os
import sys
import pathlib
from contextlib import suppress
from typing import Union
if sys.version_info >= (3, 10):
from zipfile import Path as ZipPath # type: ignore
else:
from zipp import Path as ZipPath # type: ignore
try:
from typing import runtime_checkable # type: ignore
except ImportError:
def runtime_checkable(cls): # type: ignore
return cls
try:
from typing import Protocol # type: ignore
except ImportError:
Protocol = abc.ABC # type: ignore
class TraversableResourcesLoader:
"""
Adapt loaders to provide TraversableResources and other
compatibility.
Used primarily for Python 3.9 and earlier where the native
loaders do not yet implement TraversableResources.
"""
def __init__(self, spec):
self.spec = spec
@property
def path(self):
return self.spec.origin
def get_resource_reader(self, name):
from . import readers, _adapters
def _zip_reader(spec):
with suppress(AttributeError):
return readers.ZipReader(spec.loader, spec.name)
def _namespace_reader(spec):
with suppress(AttributeError, ValueError):
return readers.NamespaceReader(spec.submodule_search_locations)
def _available_reader(spec):
with suppress(AttributeError):
return spec.loader.get_resource_reader(spec.name)
def _native_reader(spec):
reader = _available_reader(spec)
return reader if hasattr(reader, 'files') else None
def _file_reader(spec):
try:
path = pathlib.Path(self.path)
except TypeError:
return None
if path.exists():
return readers.FileReader(self)
return (
# native reader if it supplies 'files'
_native_reader(self.spec)
or
# local ZipReader if a zip module
_zip_reader(self.spec)
or
# local NamespaceReader if a namespace module
_namespace_reader(self.spec)
or
# local FileReader
_file_reader(self.spec)
# fallback - adapt the spec ResourceReader to TraversableReader
or _adapters.CompatibilityFiles(self.spec)
)
def wrap_spec(package):
"""
Construct a package spec with traversable compatibility
on the spec/loader/reader.
Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
from above for older Python compatibility (<3.10).
"""
from . import _adapters
return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
if sys.version_info >= (3, 9):
StrPath = Union[str, os.PathLike[str]]
else:
# PathLike is only subscriptable at runtime in 3.9+
StrPath = Union[str, "os.PathLike[str]"]

View file

@ -0,0 +1,35 @@
from itertools import filterfalse
from typing import (
Callable,
Iterable,
Iterator,
Optional,
Set,
TypeVar,
Union,
)
# Type and type variable definitions
_T = TypeVar('_T')
_U = TypeVar('_U')
def unique_everseen(
iterable: Iterable[_T], key: Optional[Callable[[_T], _U]] = None
) -> Iterator[_T]:
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen: Set[Union[_T, _U]] = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element

View file

@ -0,0 +1,120 @@
import functools
import os
import pathlib
import types
import warnings
from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
from . import _common
Package = Union[types.ModuleType, str]
Resource = str
def deprecated(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
warnings.warn(
f"{func.__name__} is deprecated. Use files() instead. "
"Refer to https://importlib-resources.readthedocs.io"
"/en/latest/using.html#migrating-from-legacy for migration advice.",
DeprecationWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return wrapper
def normalize_path(path: Any) -> str:
"""Normalize a path by ensuring it is a string.
If the resulting string contains path separators, an exception is raised.
"""
str_path = str(path)
parent, file_name = os.path.split(str_path)
if parent:
raise ValueError(f'{path!r} must be only a file name')
return file_name
@deprecated
def open_binary(package: Package, resource: Resource) -> BinaryIO:
"""Return a file-like object opened for binary reading of the resource."""
return (_common.files(package) / normalize_path(resource)).open('rb')
@deprecated
def read_binary(package: Package, resource: Resource) -> bytes:
"""Return the binary contents of the resource."""
return (_common.files(package) / normalize_path(resource)).read_bytes()
@deprecated
def open_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> TextIO:
"""Return a file-like object opened for text reading of the resource."""
return (_common.files(package) / normalize_path(resource)).open(
'r', encoding=encoding, errors=errors
)
@deprecated
def read_text(
package: Package,
resource: Resource,
encoding: str = 'utf-8',
errors: str = 'strict',
) -> str:
"""Return the decoded string of the resource.
The decoding-related arguments have the same semantics as those of
bytes.decode().
"""
with open_text(package, resource, encoding, errors) as fp:
return fp.read()
@deprecated
def contents(package: Package) -> Iterable[str]:
"""Return an iterable of entries in `package`.
Note that not all entries are resources. Specifically, directories are
not considered resources. Use `is_resource()` on each entry returned here
to check if it is a resource or not.
"""
return [path.name for path in _common.files(package).iterdir()]
@deprecated
def is_resource(package: Package, name: str) -> bool:
"""True if `name` is a resource inside `package`.
Directories are *not* resources.
"""
resource = normalize_path(name)
return any(
traversable.name == resource and traversable.is_file()
for traversable in _common.files(package).iterdir()
)
@deprecated
def path(
package: Package,
resource: Resource,
) -> ContextManager[pathlib.Path]:
"""A context manager providing a file path object to the resource.
If the resource does not already exist on its own on the file system,
a temporary file will be created. If the file was created, the file
will be deleted upon exiting the context manager (no exception is
raised if the file was deleted prior to the context manager
exiting).
"""
return _common.as_file(_common.files(package) / normalize_path(resource))

View file

@ -0,0 +1,170 @@
import abc
import io
import itertools
import pathlib
from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional
from ._compat import runtime_checkable, Protocol, StrPath
__all__ = ["ResourceReader", "Traversable", "TraversableResources"]
class ResourceReader(metaclass=abc.ABCMeta):
"""Abstract base class for loaders to provide resource reading support."""
@abc.abstractmethod
def open_resource(self, resource: Text) -> BinaryIO:
"""Return an opened, file-like object for binary reading.
The 'resource' argument is expected to represent only a file name.
If the resource cannot be found, FileNotFoundError is raised.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def resource_path(self, resource: Text) -> Text:
"""Return the file system path to the specified resource.
The 'resource' argument is expected to represent only a file name.
If the resource does not exist on the file system, raise
FileNotFoundError.
"""
# This deliberately raises FileNotFoundError instead of
# NotImplementedError so that if this method is accidentally called,
# it'll still do the right thing.
raise FileNotFoundError
@abc.abstractmethod
def is_resource(self, path: Text) -> bool:
"""Return True if the named 'path' is a resource.
Files are resources, directories are not.
"""
raise FileNotFoundError
@abc.abstractmethod
def contents(self) -> Iterable[str]:
"""Return an iterable of entries in `package`."""
raise FileNotFoundError
class TraversalError(Exception):
pass
@runtime_checkable
class Traversable(Protocol):
"""
An object with a subset of pathlib.Path methods suitable for
traversing directories and opening files.
Any exceptions that occur when accessing the backing resource
may propagate unaltered.
"""
@abc.abstractmethod
def iterdir(self) -> Iterator["Traversable"]:
"""
Yield Traversable objects in self
"""
def read_bytes(self) -> bytes:
"""
Read contents of self as bytes
"""
with self.open('rb') as strm:
return strm.read()
def read_text(self, encoding: Optional[str] = None) -> str:
"""
Read contents of self as text
"""
with self.open(encoding=encoding) as strm:
return strm.read()
@abc.abstractmethod
def is_dir(self) -> bool:
"""
Return True if self is a directory
"""
@abc.abstractmethod
def is_file(self) -> bool:
"""
Return True if self is a file
"""
def joinpath(self, *descendants: StrPath) -> "Traversable":
"""
Return Traversable resolved with any descendants applied.
Each descendant should be a path segment relative to self
and each may contain multiple levels separated by
``posixpath.sep`` (``/``).
"""
if not descendants:
return self
names = itertools.chain.from_iterable(
path.parts for path in map(pathlib.PurePosixPath, descendants)
)
target = next(names)
matches = (
traversable for traversable in self.iterdir() if traversable.name == target
)
try:
match = next(matches)
except StopIteration:
raise TraversalError(
"Target not found during traversal.", target, list(names)
)
return match.joinpath(*names)
def __truediv__(self, child: StrPath) -> "Traversable":
"""
Return Traversable child in self
"""
return self.joinpath(child)
@abc.abstractmethod
def open(self, mode='r', *args, **kwargs):
"""
mode may be 'r' or 'rb' to open as text or binary. Return a handle
suitable for reading (same as pathlib.Path.open).
When opening as text, accepts encoding parameters such as those
accepted by io.TextIOWrapper.
"""
@property
@abc.abstractmethod
def name(self) -> str:
"""
The base name of this object without any parent references.
"""
class TraversableResources(ResourceReader):
"""
The required interface for providing traversable
resources.
"""
@abc.abstractmethod
def files(self) -> "Traversable":
"""Return a Traversable object for the loaded package."""
def open_resource(self, resource: StrPath) -> io.BufferedReader:
return self.files().joinpath(resource).open('rb')
def resource_path(self, resource: Any) -> NoReturn:
raise FileNotFoundError(resource)
def is_resource(self, path: StrPath) -> bool:
return self.files().joinpath(path).is_file()
def contents(self) -> Iterator[str]:
return (item.name for item in self.files().iterdir())

View file

@ -0,0 +1,120 @@
import collections
import pathlib
import operator
from . import abc
from ._itertools import unique_everseen
from ._compat import ZipPath
def remove_duplicates(items):
return iter(collections.OrderedDict.fromkeys(items))
class FileReader(abc.TraversableResources):
def __init__(self, loader):
self.path = pathlib.Path(loader.path).parent
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path
class ZipReader(abc.TraversableResources):
def __init__(self, loader, module):
_, _, name = module.rpartition('.')
self.prefix = loader.prefix.replace('\\', '/') + name + '/'
self.archive = loader.archive
def open_resource(self, resource):
try:
return super().open_resource(resource)
except KeyError as exc:
raise FileNotFoundError(exc.args[0])
def is_resource(self, path):
# workaround for `zipfile.Path.is_file` returning true
# for non-existent paths.
target = self.files().joinpath(path)
return target.is_file() and target.exists()
def files(self):
return ZipPath(self.archive, self.prefix)
class MultiplexedPath(abc.Traversable):
"""
Given a series of Traversable objects, implement a merged
version of the interface across all objects. Useful for
namespace packages which may be multihomed at a single
name.
"""
def __init__(self, *paths):
self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
if not self._paths:
message = 'MultiplexedPath must contain at least one path'
raise FileNotFoundError(message)
if not all(path.is_dir() for path in self._paths):
raise NotADirectoryError('MultiplexedPath only supports directories')
def iterdir(self):
files = (file for path in self._paths for file in path.iterdir())
return unique_everseen(files, key=operator.attrgetter('name'))
def read_bytes(self):
raise FileNotFoundError(f'{self} is not a file')
def read_text(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
def is_dir(self):
return True
def is_file(self):
return False
def joinpath(self, *descendants):
try:
return super().joinpath(*descendants)
except abc.TraversalError:
# One of the paths did not resolve (a directory does not exist).
# Just return something that will not exist.
return self._paths[0].joinpath(*descendants)
def open(self, *args, **kwargs):
raise FileNotFoundError(f'{self} is not a file')
@property
def name(self):
return self._paths[0].name
def __repr__(self):
paths = ', '.join(f"'{path}'" for path in self._paths)
return f'MultiplexedPath({paths})'
class NamespaceReader(abc.TraversableResources):
def __init__(self, namespace_path):
if 'NamespacePath' not in str(namespace_path):
raise ValueError('Invalid path')
self.path = MultiplexedPath(*list(namespace_path))
def resource_path(self, resource):
"""
Return the file system path to prevent
`resources.path()` from creating a temporary
copy.
"""
return str(self.path.joinpath(resource))
def files(self):
return self.path

View file

@ -0,0 +1,106 @@
"""
Interface adapters for low-level readers.
"""
import abc
import io
import itertools
from typing import BinaryIO, List
from .abc import Traversable, TraversableResources
class SimpleReader(abc.ABC):
"""
The minimum, low-level interface required from a resource
provider.
"""
@property
@abc.abstractmethod
def package(self) -> str:
"""
The name of the package for which this reader loads resources.
"""
@abc.abstractmethod
def children(self) -> List['SimpleReader']:
"""
Obtain an iterable of SimpleReader for available
child containers (e.g. directories).
"""
@abc.abstractmethod
def resources(self) -> List[str]:
"""
Obtain available named resources for this virtual package.
"""
@abc.abstractmethod
def open_binary(self, resource: str) -> BinaryIO:
"""
Obtain a File-like for a named resource.
"""
@property
def name(self):
return self.package.split('.')[-1]
class ResourceContainer(Traversable):
"""
Traversable container for a package's resources via its reader.
"""
def __init__(self, reader: SimpleReader):
self.reader = reader
def is_dir(self):
return True
def is_file(self):
return False
def iterdir(self):
files = (ResourceHandle(self, name) for name in self.reader.resources)
dirs = map(ResourceContainer, self.reader.children())
return itertools.chain(files, dirs)
def open(self, *args, **kwargs):
raise IsADirectoryError()
class ResourceHandle(Traversable):
"""
Handle to a named resource in a ResourceReader.
"""
def __init__(self, parent: ResourceContainer, name: str):
self.parent = parent
self.name = name # type: ignore
def is_file(self):
return True
def is_dir(self):
return False
def open(self, mode='r', *args, **kwargs):
stream = self.parent.reader.open_binary(self.name)
if 'b' not in mode:
stream = io.TextIOWrapper(*args, **kwargs)
return stream
def joinpath(self, name):
raise RuntimeError("Cannot traverse into a resource")
class TraversableReader(TraversableResources, SimpleReader):
"""
A TraversableResources based on SimpleReader. Resource providers
may derive from this class to provide the TraversableResources
interface by supplying the SimpleReader interface.
"""
def files(self):
return ResourceContainer(self)

View file

@ -0,0 +1,32 @@
import os
try:
from test.support import import_helper # type: ignore
except ImportError:
# Python 3.9 and earlier
class import_helper: # type: ignore
from test.support import (
modules_setup,
modules_cleanup,
DirsOnSysPath,
CleanImport,
)
try:
from test.support import os_helper # type: ignore
except ImportError:
# Python 3.9 compat
class os_helper: # type:ignore
from test.support import temp_dir
try:
# Python 3.10
from test.support.os_helper import unlink
except ImportError:
from test.support import unlink as _unlink
def unlink(target):
return _unlink(os.fspath(target))

View file

@ -0,0 +1,50 @@
import pathlib
import functools
####
# from jaraco.path 3.4
def build(spec, prefix=pathlib.Path()):
"""
Build a set of files/directories, as described by the spec.
Each key represents a pathname, and the value represents
the content. Content may be a nested directory.
>>> spec = {
... 'README.txt': "A README file",
... "foo": {
... "__init__.py": "",
... "bar": {
... "__init__.py": "",
... },
... "baz.py": "# Some code",
... }
... }
>>> tmpdir = getfixture('tmpdir')
>>> build(spec, tmpdir)
"""
for name, contents in spec.items():
create(contents, pathlib.Path(prefix) / name)
@functools.singledispatch
def create(content, path):
path.mkdir(exist_ok=True)
build(content, prefix=path) # type: ignore
@create.register
def _(content: bytes, path):
path.write_bytes(content)
@create.register
def _(content: str, path):
path.write_text(content)
# end from jaraco.path
####

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1 @@
Hello, UTF-8 world!

View file

@ -0,0 +1 @@
one resource

View file

@ -0,0 +1 @@
two resource

View file

@ -0,0 +1 @@
Hello, UTF-8 world!

View file

@ -0,0 +1,102 @@
import io
import unittest
import importlib_resources as resources
from importlib_resources._adapters import (
CompatibilityFiles,
wrap_spec,
)
from . import util
class CompatibilityFilesTests(unittest.TestCase):
@property
def package(self):
bytes_data = io.BytesIO(b'Hello, world!')
return util.create_package(
file=bytes_data,
path='some_path',
contents=('a', 'b', 'c'),
)
@property
def files(self):
return resources.files(self.package)
def test_spec_path_iter(self):
self.assertEqual(
sorted(path.name for path in self.files.iterdir()),
['a', 'b', 'c'],
)
def test_child_path_iter(self):
self.assertEqual(list((self.files / 'a').iterdir()), [])
def test_orphan_path_iter(self):
self.assertEqual(list((self.files / 'a' / 'a').iterdir()), [])
self.assertEqual(list((self.files / 'a' / 'a' / 'a').iterdir()), [])
def test_spec_path_is(self):
self.assertFalse(self.files.is_file())
self.assertFalse(self.files.is_dir())
def test_child_path_is(self):
self.assertTrue((self.files / 'a').is_file())
self.assertFalse((self.files / 'a').is_dir())
def test_orphan_path_is(self):
self.assertFalse((self.files / 'a' / 'a').is_file())
self.assertFalse((self.files / 'a' / 'a').is_dir())
self.assertFalse((self.files / 'a' / 'a' / 'a').is_file())
self.assertFalse((self.files / 'a' / 'a' / 'a').is_dir())
def test_spec_path_name(self):
self.assertEqual(self.files.name, 'testingpackage')
def test_child_path_name(self):
self.assertEqual((self.files / 'a').name, 'a')
def test_orphan_path_name(self):
self.assertEqual((self.files / 'a' / 'b').name, 'b')
self.assertEqual((self.files / 'a' / 'b' / 'c').name, 'c')
def test_spec_path_open(self):
self.assertEqual(self.files.read_bytes(), b'Hello, world!')
self.assertEqual(self.files.read_text(), 'Hello, world!')
def test_child_path_open(self):
self.assertEqual((self.files / 'a').read_bytes(), b'Hello, world!')
self.assertEqual((self.files / 'a').read_text(), 'Hello, world!')
def test_orphan_path_open(self):
with self.assertRaises(FileNotFoundError):
(self.files / 'a' / 'b').read_bytes()
with self.assertRaises(FileNotFoundError):
(self.files / 'a' / 'b' / 'c').read_bytes()
def test_open_invalid_mode(self):
with self.assertRaises(ValueError):
self.files.open('0')
def test_orphan_path_invalid(self):
with self.assertRaises(ValueError):
CompatibilityFiles.OrphanPath()
def test_wrap_spec(self):
spec = wrap_spec(self.package)
self.assertIsInstance(spec.loader.get_resource_reader(None), CompatibilityFiles)
class CompatibilityFilesNoReaderTests(unittest.TestCase):
@property
def package(self):
return util.create_package_from_loader(None)
@property
def files(self):
return resources.files(self.package)
def test_spec_path_joinpath(self):
self.assertIsInstance(self.files / 'a', CompatibilityFiles.OrphanPath)

View file

@ -0,0 +1,43 @@
import unittest
import importlib_resources as resources
from . import data01
from . import util
class ContentsTests:
expected = {
'__init__.py',
'binary.file',
'subdirectory',
'utf-16.file',
'utf-8.file',
}
def test_contents(self):
contents = {path.name for path in resources.files(self.data).iterdir()}
assert self.expected <= contents
class ContentsDiskTests(ContentsTests, unittest.TestCase):
def setUp(self):
self.data = data01
class ContentsZipTests(ContentsTests, util.ZipSetup, unittest.TestCase):
pass
class ContentsNamespaceTests(ContentsTests, unittest.TestCase):
expected = {
# no __init__ because of namespace design
# no subdirectory as incidental difference in fixture
'binary.file',
'utf-16.file',
'utf-8.file',
}
def setUp(self):
from . import namespacedata01
self.data = namespacedata01

View file

@ -0,0 +1,112 @@
import typing
import textwrap
import unittest
import warnings
import importlib
import contextlib
import importlib_resources as resources
from ..abc import Traversable
from . import data01
from . import util
from . import _path
from ._compat import os_helper, import_helper
@contextlib.contextmanager
def suppress_known_deprecation():
with warnings.catch_warnings(record=True) as ctx:
warnings.simplefilter('default', category=DeprecationWarning)
yield ctx
class FilesTests:
def test_read_bytes(self):
files = resources.files(self.data)
actual = files.joinpath('utf-8.file').read_bytes()
assert actual == b'Hello, UTF-8 world!\n'
def test_read_text(self):
files = resources.files(self.data)
actual = files.joinpath('utf-8.file').read_text(encoding='utf-8')
assert actual == 'Hello, UTF-8 world!\n'
@unittest.skipUnless(
hasattr(typing, 'runtime_checkable'),
"Only suitable when typing supports runtime_checkable",
)
def test_traversable(self):
assert isinstance(resources.files(self.data), Traversable)
def test_old_parameter(self):
"""
Files used to take a 'package' parameter. Make sure anyone
passing by name is still supported.
"""
with suppress_known_deprecation():
resources.files(package=self.data)
class OpenDiskTests(FilesTests, unittest.TestCase):
def setUp(self):
self.data = data01
class OpenZipTests(FilesTests, util.ZipSetup, unittest.TestCase):
pass
class OpenNamespaceTests(FilesTests, unittest.TestCase):
def setUp(self):
from . import namespacedata01
self.data = namespacedata01
class SiteDir:
def setUp(self):
self.fixtures = contextlib.ExitStack()
self.addCleanup(self.fixtures.close)
self.site_dir = self.fixtures.enter_context(os_helper.temp_dir())
self.fixtures.enter_context(import_helper.DirsOnSysPath(self.site_dir))
self.fixtures.enter_context(import_helper.CleanImport())
class ModulesFilesTests(SiteDir, unittest.TestCase):
def test_module_resources(self):
"""
A module can have resources found adjacent to the module.
"""
spec = {
'mod.py': '',
'res.txt': 'resources are the best',
}
_path.build(spec, self.site_dir)
import mod
actual = resources.files(mod).joinpath('res.txt').read_text()
assert actual == spec['res.txt']
class ImplicitContextFilesTests(SiteDir, unittest.TestCase):
def test_implicit_files(self):
"""
Without any parameter, files() will infer the location as the caller.
"""
spec = {
'somepkg': {
'__init__.py': textwrap.dedent(
"""
import importlib_resources as res
val = res.files().joinpath('res.txt').read_text()
"""
),
'res.txt': 'resources are the best',
},
}
_path.build(spec, self.site_dir)
assert importlib.import_module('somepkg').val == 'resources are the best'
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,81 @@
import unittest
import importlib_resources as resources
from . import data01
from . import util
class CommonBinaryTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
target = resources.files(package).joinpath(path)
with target.open('rb'):
pass
class CommonTextTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
target = resources.files(package).joinpath(path)
with target.open():
pass
class OpenTests:
def test_open_binary(self):
target = resources.files(self.data) / 'binary.file'
with target.open('rb') as fp:
result = fp.read()
self.assertEqual(result, b'\x00\x01\x02\x03')
def test_open_text_default_encoding(self):
target = resources.files(self.data) / 'utf-8.file'
with target.open() as fp:
result = fp.read()
self.assertEqual(result, 'Hello, UTF-8 world!\n')
def test_open_text_given_encoding(self):
target = resources.files(self.data) / 'utf-16.file'
with target.open(encoding='utf-16', errors='strict') as fp:
result = fp.read()
self.assertEqual(result, 'Hello, UTF-16 world!\n')
def test_open_text_with_errors(self):
# Raises UnicodeError without the 'errors' argument.
target = resources.files(self.data) / 'utf-16.file'
with target.open(encoding='utf-8', errors='strict') as fp:
self.assertRaises(UnicodeError, fp.read)
with target.open(encoding='utf-8', errors='ignore') as fp:
result = fp.read()
self.assertEqual(
result,
'H\x00e\x00l\x00l\x00o\x00,\x00 '
'\x00U\x00T\x00F\x00-\x001\x006\x00 '
'\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
)
def test_open_binary_FileNotFoundError(self):
target = resources.files(self.data) / 'does-not-exist'
self.assertRaises(FileNotFoundError, target.open, 'rb')
def test_open_text_FileNotFoundError(self):
target = resources.files(self.data) / 'does-not-exist'
self.assertRaises(FileNotFoundError, target.open)
class OpenDiskTests(OpenTests, unittest.TestCase):
def setUp(self):
self.data = data01
class OpenDiskNamespaceTests(OpenTests, unittest.TestCase):
def setUp(self):
from . import namespacedata01
self.data = namespacedata01
class OpenZipTests(OpenTests, util.ZipSetup, unittest.TestCase):
pass
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,64 @@
import io
import unittest
import importlib_resources as resources
from . import data01
from . import util
class CommonTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
with resources.as_file(resources.files(package).joinpath(path)):
pass
class PathTests:
def test_reading(self):
# Path should be readable.
# Test also implicitly verifies the returned object is a pathlib.Path
# instance.
target = resources.files(self.data) / 'utf-8.file'
with resources.as_file(target) as path:
self.assertTrue(path.name.endswith("utf-8.file"), repr(path))
# pathlib.Path.read_text() was introduced in Python 3.5.
with path.open('r', encoding='utf-8') as file:
text = file.read()
self.assertEqual('Hello, UTF-8 world!\n', text)
class PathDiskTests(PathTests, unittest.TestCase):
data = data01
def test_natural_path(self):
"""
Guarantee the internal implementation detail that
file-system-backed resources do not get the tempdir
treatment.
"""
target = resources.files(self.data) / 'utf-8.file'
with resources.as_file(target) as path:
assert 'data' in str(path)
class PathMemoryTests(PathTests, unittest.TestCase):
def setUp(self):
file = io.BytesIO(b'Hello, UTF-8 world!\n')
self.addCleanup(file.close)
self.data = util.create_package(
file=file, path=FileNotFoundError("package exists only in memory")
)
self.data.__spec__.origin = None
self.data.__spec__.has_location = False
class PathZipTests(PathTests, util.ZipSetup, unittest.TestCase):
def test_remove_in_context_manager(self):
# It is not an error if the file that was temporarily stashed on the
# file system is removed inside the `with` stanza.
target = resources.files(self.data) / 'utf-8.file'
with resources.as_file(target) as path:
path.unlink()
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,76 @@
import unittest
import importlib_resources as resources
from . import data01
from . import util
from importlib import import_module
class CommonBinaryTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
resources.files(package).joinpath(path).read_bytes()
class CommonTextTests(util.CommonTests, unittest.TestCase):
def execute(self, package, path):
resources.files(package).joinpath(path).read_text()
class ReadTests:
def test_read_bytes(self):
result = resources.files(self.data).joinpath('binary.file').read_bytes()
self.assertEqual(result, b'\0\1\2\3')
def test_read_text_default_encoding(self):
result = resources.files(self.data).joinpath('utf-8.file').read_text()
self.assertEqual(result, 'Hello, UTF-8 world!\n')
def test_read_text_given_encoding(self):
result = (
resources.files(self.data)
.joinpath('utf-16.file')
.read_text(encoding='utf-16')
)
self.assertEqual(result, 'Hello, UTF-16 world!\n')
def test_read_text_with_errors(self):
# Raises UnicodeError without the 'errors' argument.
target = resources.files(self.data) / 'utf-16.file'
self.assertRaises(UnicodeError, target.read_text, encoding='utf-8')
result = target.read_text(encoding='utf-8', errors='ignore')
self.assertEqual(
result,
'H\x00e\x00l\x00l\x00o\x00,\x00 '
'\x00U\x00T\x00F\x00-\x001\x006\x00 '
'\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
)
class ReadDiskTests(ReadTests, unittest.TestCase):
data = data01
class ReadZipTests(ReadTests, util.ZipSetup, unittest.TestCase):
def test_read_submodule_resource(self):
submodule = import_module('ziptestdata.subdirectory')
result = resources.files(submodule).joinpath('binary.file').read_bytes()
self.assertEqual(result, b'\0\1\2\3')
def test_read_submodule_resource_by_name(self):
result = (
resources.files('ziptestdata.subdirectory')
.joinpath('binary.file')
.read_bytes()
)
self.assertEqual(result, b'\0\1\2\3')
class ReadNamespaceTests(ReadTests, unittest.TestCase):
def setUp(self):
from . import namespacedata01
self.data = namespacedata01
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,133 @@
import os.path
import sys
import pathlib
import unittest
from importlib import import_module
from importlib_resources.readers import MultiplexedPath, NamespaceReader
class MultiplexedPathTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = pathlib.Path(__file__).parent / 'namespacedata01'
cls.folder = str(path)
def test_init_no_paths(self):
with self.assertRaises(FileNotFoundError):
MultiplexedPath()
def test_init_file(self):
with self.assertRaises(NotADirectoryError):
MultiplexedPath(os.path.join(self.folder, 'binary.file'))
def test_iterdir(self):
contents = {path.name for path in MultiplexedPath(self.folder).iterdir()}
try:
contents.remove('__pycache__')
except (KeyError, ValueError):
pass
self.assertEqual(contents, {'binary.file', 'utf-16.file', 'utf-8.file'})
def test_iterdir_duplicate(self):
data01 = os.path.abspath(os.path.join(__file__, '..', 'data01'))
contents = {
path.name for path in MultiplexedPath(self.folder, data01).iterdir()
}
for remove in ('__pycache__', '__init__.pyc'):
try:
contents.remove(remove)
except (KeyError, ValueError):
pass
self.assertEqual(
contents,
{'__init__.py', 'binary.file', 'subdirectory', 'utf-16.file', 'utf-8.file'},
)
def test_is_dir(self):
self.assertEqual(MultiplexedPath(self.folder).is_dir(), True)
def test_is_file(self):
self.assertEqual(MultiplexedPath(self.folder).is_file(), False)
def test_open_file(self):
path = MultiplexedPath(self.folder)
with self.assertRaises(FileNotFoundError):
path.read_bytes()
with self.assertRaises(FileNotFoundError):
path.read_text()
with self.assertRaises(FileNotFoundError):
path.open()
def test_join_path(self):
prefix = os.path.abspath(os.path.join(__file__, '..'))
data01 = os.path.join(prefix, 'data01')
path = MultiplexedPath(self.folder, data01)
self.assertEqual(
str(path.joinpath('binary.file'))[len(prefix) + 1 :],
os.path.join('namespacedata01', 'binary.file'),
)
self.assertEqual(
str(path.joinpath('subdirectory'))[len(prefix) + 1 :],
os.path.join('data01', 'subdirectory'),
)
self.assertEqual(
str(path.joinpath('imaginary'))[len(prefix) + 1 :],
os.path.join('namespacedata01', 'imaginary'),
)
self.assertEqual(path.joinpath(), path)
def test_join_path_compound(self):
path = MultiplexedPath(self.folder)
assert not path.joinpath('imaginary/foo.py').exists()
def test_repr(self):
self.assertEqual(
repr(MultiplexedPath(self.folder)),
f"MultiplexedPath('{self.folder}')",
)
def test_name(self):
self.assertEqual(
MultiplexedPath(self.folder).name,
os.path.basename(self.folder),
)
class NamespaceReaderTest(unittest.TestCase):
site_dir = str(pathlib.Path(__file__).parent)
@classmethod
def setUpClass(cls):
sys.path.append(cls.site_dir)
@classmethod
def tearDownClass(cls):
sys.path.remove(cls.site_dir)
def test_init_error(self):
with self.assertRaises(ValueError):
NamespaceReader(['path1', 'path2'])
def test_resource_path(self):
namespacedata01 = import_module('namespacedata01')
reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
self.assertEqual(
reader.resource_path('binary.file'), os.path.join(root, 'binary.file')
)
self.assertEqual(
reader.resource_path('imaginary'), os.path.join(root, 'imaginary')
)
def test_files(self):
namespacedata01 = import_module('namespacedata01')
reader = NamespaceReader(namespacedata01.__spec__.submodule_search_locations)
root = os.path.abspath(os.path.join(__file__, '..', 'namespacedata01'))
self.assertIsInstance(reader.files(), MultiplexedPath)
self.assertEqual(repr(reader.files()), f"MultiplexedPath('{root}')")
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,260 @@
import sys
import unittest
import importlib_resources as resources
import uuid
import pathlib
from . import data01
from . import zipdata01, zipdata02
from . import util
from importlib import import_module
from ._compat import import_helper, unlink
class ResourceTests:
# Subclasses are expected to set the `data` attribute.
def test_is_file_exists(self):
target = resources.files(self.data) / 'binary.file'
self.assertTrue(target.is_file())
def test_is_file_missing(self):
target = resources.files(self.data) / 'not-a-file'
self.assertFalse(target.is_file())
def test_is_dir(self):
target = resources.files(self.data) / 'subdirectory'
self.assertFalse(target.is_file())
self.assertTrue(target.is_dir())
class ResourceDiskTests(ResourceTests, unittest.TestCase):
def setUp(self):
self.data = data01
class ResourceZipTests(ResourceTests, util.ZipSetup, unittest.TestCase):
pass
def names(traversable):
return {item.name for item in traversable.iterdir()}
class ResourceLoaderTests(unittest.TestCase):
def test_resource_contents(self):
package = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C']
)
self.assertEqual(names(resources.files(package)), {'A', 'B', 'C'})
def test_is_file(self):
package = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
)
self.assertTrue(resources.files(package).joinpath('B').is_file())
def test_is_dir(self):
package = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
)
self.assertTrue(resources.files(package).joinpath('D').is_dir())
def test_resource_missing(self):
package = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C', 'D/E', 'D/F']
)
self.assertFalse(resources.files(package).joinpath('Z').is_file())
class ResourceCornerCaseTests(unittest.TestCase):
def test_package_has_no_reader_fallback(self):
# Test odd ball packages which:
# 1. Do not have a ResourceReader as a loader
# 2. Are not on the file system
# 3. Are not in a zip file
module = util.create_package(
file=data01, path=data01.__file__, contents=['A', 'B', 'C']
)
# Give the module a dummy loader.
module.__loader__ = object()
# Give the module a dummy origin.
module.__file__ = '/path/which/shall/not/be/named'
module.__spec__.loader = module.__loader__
module.__spec__.origin = module.__file__
self.assertFalse(resources.files(module).joinpath('A').is_file())
class ResourceFromZipsTest01(util.ZipSetupBase, unittest.TestCase):
ZIP_MODULE = zipdata01 # type: ignore
def test_is_submodule_resource(self):
submodule = import_module('ziptestdata.subdirectory')
self.assertTrue(resources.files(submodule).joinpath('binary.file').is_file())
def test_read_submodule_resource_by_name(self):
self.assertTrue(
resources.files('ziptestdata.subdirectory')
.joinpath('binary.file')
.is_file()
)
def test_submodule_contents(self):
submodule = import_module('ziptestdata.subdirectory')
self.assertEqual(
names(resources.files(submodule)), {'__init__.py', 'binary.file'}
)
def test_submodule_contents_by_name(self):
self.assertEqual(
names(resources.files('ziptestdata.subdirectory')),
{'__init__.py', 'binary.file'},
)
def test_as_file_directory(self):
with resources.as_file(resources.files('ziptestdata')) as data:
assert data.name == 'ziptestdata'
assert data.is_dir()
assert data.joinpath('subdirectory').is_dir()
assert len(list(data.iterdir()))
assert not data.parent.exists()
class ResourceFromZipsTest02(util.ZipSetupBase, unittest.TestCase):
ZIP_MODULE = zipdata02 # type: ignore
def test_unrelated_contents(self):
"""
Test thata zip with two unrelated subpackages return
distinct resources. Ref python/importlib_resources#44.
"""
self.assertEqual(
names(resources.files('ziptestdata.one')),
{'__init__.py', 'resource1.txt'},
)
self.assertEqual(
names(resources.files('ziptestdata.two')),
{'__init__.py', 'resource2.txt'},
)
class DeletingZipsTest(unittest.TestCase):
"""Having accessed resources in a zip file should not keep an open
reference to the zip.
"""
ZIP_MODULE = zipdata01
def setUp(self):
modules = import_helper.modules_setup()
self.addCleanup(import_helper.modules_cleanup, *modules)
data_path = pathlib.Path(self.ZIP_MODULE.__file__)
data_dir = data_path.parent
self.source_zip_path = data_dir / 'ziptestdata.zip'
self.zip_path = pathlib.Path(f'{uuid.uuid4()}.zip').absolute()
self.zip_path.write_bytes(self.source_zip_path.read_bytes())
sys.path.append(str(self.zip_path))
self.data = import_module('ziptestdata')
def tearDown(self):
try:
sys.path.remove(str(self.zip_path))
except ValueError:
pass
try:
del sys.path_importer_cache[str(self.zip_path)]
del sys.modules[self.data.__name__]
except KeyError:
pass
try:
unlink(self.zip_path)
except OSError:
# If the test fails, this will probably fail too
pass
def test_iterdir_does_not_keep_open(self):
c = [item.name for item in resources.files('ziptestdata').iterdir()]
self.zip_path.unlink()
del c
def test_is_file_does_not_keep_open(self):
c = resources.files('ziptestdata').joinpath('binary.file').is_file()
self.zip_path.unlink()
del c
def test_is_file_failure_does_not_keep_open(self):
c = resources.files('ziptestdata').joinpath('not-present').is_file()
self.zip_path.unlink()
del c
@unittest.skip("Desired but not supported.")
def test_as_file_does_not_keep_open(self): # pragma: no cover
c = resources.as_file(resources.files('ziptestdata') / 'binary.file')
self.zip_path.unlink()
del c
def test_entered_path_does_not_keep_open(self):
# This is what certifi does on import to make its bundle
# available for the process duration.
c = resources.as_file(
resources.files('ziptestdata') / 'binary.file'
).__enter__()
self.zip_path.unlink()
del c
def test_read_binary_does_not_keep_open(self):
c = resources.files('ziptestdata').joinpath('binary.file').read_bytes()
self.zip_path.unlink()
del c
def test_read_text_does_not_keep_open(self):
c = resources.files('ziptestdata').joinpath('utf-8.file').read_text()
self.zip_path.unlink()
del c
class ResourceFromNamespaceTest01(unittest.TestCase):
site_dir = str(pathlib.Path(__file__).parent)
@classmethod
def setUpClass(cls):
sys.path.append(cls.site_dir)
@classmethod
def tearDownClass(cls):
sys.path.remove(cls.site_dir)
def test_is_submodule_resource(self):
self.assertTrue(
resources.files(import_module('namespacedata01'))
.joinpath('binary.file')
.is_file()
)
def test_read_submodule_resource_by_name(self):
self.assertTrue(
resources.files('namespacedata01').joinpath('binary.file').is_file()
)
def test_submodule_contents(self):
contents = names(resources.files(import_module('namespacedata01')))
try:
contents.remove('__pycache__')
except KeyError:
pass
self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
def test_submodule_contents_by_name(self):
contents = names(resources.files('namespacedata01'))
try:
contents.remove('__pycache__')
except KeyError:
pass
self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
if __name__ == '__main__':
unittest.main()

View file

@ -0,0 +1,53 @@
"""
Generate the zip test data files.
Run to build the tests/zipdataNN/ziptestdata.zip files from
files in tests/dataNN.
Replaces the file with the working copy, but does commit anything
to the source repo.
"""
import contextlib
import os
import pathlib
import zipfile
def main():
"""
>>> from unittest import mock
>>> monkeypatch = getfixture('monkeypatch')
>>> monkeypatch.setattr(zipfile, 'ZipFile', mock.MagicMock())
>>> print(); main() # print workaround for bpo-32509
<BLANKLINE>
...data01... -> ziptestdata/...
...
...data02... -> ziptestdata/...
...
"""
suffixes = '01', '02'
tuple(map(generate, suffixes))
def generate(suffix):
root = pathlib.Path(__file__).parent.relative_to(os.getcwd())
zfpath = root / f'zipdata{suffix}/ziptestdata.zip'
with zipfile.ZipFile(zfpath, 'w') as zf:
for src, rel in walk(root / f'data{suffix}'):
dst = 'ziptestdata' / pathlib.PurePosixPath(rel.as_posix())
print(src, '->', dst)
zf.write(src, dst)
def walk(datapath):
for dirpath, dirnames, filenames in os.walk(datapath):
with contextlib.suppress(ValueError):
dirnames.remove('__pycache__')
for filename in filenames:
res = pathlib.Path(dirpath) / filename
rel = res.relative_to(datapath)
yield res, rel
__name__ == '__main__' and main()

View file

@ -0,0 +1,167 @@
import abc
import importlib
import io
import sys
import types
import pathlib
from . import data01
from . import zipdata01
from ..abc import ResourceReader
from ._compat import import_helper
from importlib.machinery import ModuleSpec
class Reader(ResourceReader):
def __init__(self, **kwargs):
vars(self).update(kwargs)
def get_resource_reader(self, package):
return self
def open_resource(self, path):
self._path = path
if isinstance(self.file, Exception):
raise self.file
return self.file
def resource_path(self, path_):
self._path = path_
if isinstance(self.path, Exception):
raise self.path
return self.path
def is_resource(self, path_):
self._path = path_
if isinstance(self.path, Exception):
raise self.path
def part(entry):
return entry.split('/')
return any(
len(parts) == 1 and parts[0] == path_ for parts in map(part, self._contents)
)
def contents(self):
if isinstance(self.path, Exception):
raise self.path
yield from self._contents
def create_package_from_loader(loader, is_package=True):
name = 'testingpackage'
module = types.ModuleType(name)
spec = ModuleSpec(name, loader, origin='does-not-exist', is_package=is_package)
module.__spec__ = spec
module.__loader__ = loader
return module
def create_package(file=None, path=None, is_package=True, contents=()):
return create_package_from_loader(
Reader(file=file, path=path, _contents=contents),
is_package,
)
class CommonTests(metaclass=abc.ABCMeta):
"""
Tests shared by test_open, test_path, and test_read.
"""
@abc.abstractmethod
def execute(self, package, path):
"""
Call the pertinent legacy API function (e.g. open_text, path)
on package and path.
"""
def test_package_name(self):
# Passing in the package name should succeed.
self.execute(data01.__name__, 'utf-8.file')
def test_package_object(self):
# Passing in the package itself should succeed.
self.execute(data01, 'utf-8.file')
def test_string_path(self):
# Passing in a string for the path should succeed.
path = 'utf-8.file'
self.execute(data01, path)
def test_pathlib_path(self):
# Passing in a pathlib.PurePath object for the path should succeed.
path = pathlib.PurePath('utf-8.file')
self.execute(data01, path)
def test_importing_module_as_side_effect(self):
# The anchor package can already be imported.
del sys.modules[data01.__name__]
self.execute(data01.__name__, 'utf-8.file')
def test_missing_path(self):
# Attempting to open or read or request the path for a
# non-existent path should succeed if open_resource
# can return a viable data stream.
bytes_data = io.BytesIO(b'Hello, world!')
package = create_package(file=bytes_data, path=FileNotFoundError())
self.execute(package, 'utf-8.file')
self.assertEqual(package.__loader__._path, 'utf-8.file')
def test_extant_path(self):
# Attempting to open or read or request the path when the
# path does exist should still succeed. Does not assert
# anything about the result.
bytes_data = io.BytesIO(b'Hello, world!')
# any path that exists
path = __file__
package = create_package(file=bytes_data, path=path)
self.execute(package, 'utf-8.file')
self.assertEqual(package.__loader__._path, 'utf-8.file')
def test_useless_loader(self):
package = create_package(file=FileNotFoundError(), path=FileNotFoundError())
with self.assertRaises(FileNotFoundError):
self.execute(package, 'utf-8.file')
class ZipSetupBase:
ZIP_MODULE = None
@classmethod
def setUpClass(cls):
data_path = pathlib.Path(cls.ZIP_MODULE.__file__)
data_dir = data_path.parent
cls._zip_path = str(data_dir / 'ziptestdata.zip')
sys.path.append(cls._zip_path)
cls.data = importlib.import_module('ziptestdata')
@classmethod
def tearDownClass(cls):
try:
sys.path.remove(cls._zip_path)
except ValueError:
pass
try:
del sys.path_importer_cache[cls._zip_path]
del sys.modules[cls.data.__name__]
except KeyError:
pass
try:
del cls.data
del cls._zip_path
except AttributeError:
pass
def setUp(self):
modules = import_helper.modules_setup()
self.addCleanup(import_helper.modules_cleanup, *modules)
class ZipSetup(ZipSetupBase):
ZIP_MODULE = zipdata01 # type: ignore

View file

@ -0,0 +1,10 @@
from jaraco.windows.api.filesystem import ReplaceFile
open('orig-file', 'w').write('some content')
open('replacing-file', 'w').write('new content')
ReplaceFile('orig-file', 'replacing-file', 'orig-backup', 0, 0, 0)
assert open('orig-file').read() == 'new content'
assert open('orig-backup').read() == 'some content'
import os
assert not os.path.exists('replacing-file')

View file

@ -0,0 +1,22 @@
from jaraco.windows.filesystem import trace_symlink_target
from optparse import OptionParser
def get_args():
parser = OptionParser()
options, args = parser.parse_args()
try:
options.filename = args.pop(0)
except IndexError:
parser.error('filename required')
return options
def main():
options = get_args()
print(trace_symlink_target(options.filename))
if __name__ == '__main__':
main()

3991
libs/win/inflect/__init__.py Normal file

File diff suppressed because it is too large Load diff

View file

View file

@ -1 +0,0 @@
import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('jaraco', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('jaraco', [os.path.dirname(p)])));m = m or sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)

View file

@ -1 +0,0 @@
import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('jaraco', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('jaraco', [os.path.dirname(p)])));m = m or sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)

View file

@ -1 +0,0 @@
import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('jaraco', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('jaraco', [os.path.dirname(p)])));m = m or sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)

View file

@ -1 +0,0 @@
import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('jaraco', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('jaraco', [os.path.dirname(p)])));m = m or sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)

View file

@ -1 +0,0 @@
import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('jaraco', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('jaraco', [os.path.dirname(p)])));m = m or sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)

View file

@ -1 +0,0 @@
import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('jaraco', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('jaraco', [os.path.dirname(p)])));m = m or sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)

View file

@ -1 +0,0 @@
import sys, types, os;has_mfs = sys.version_info > (3, 5);p = os.path.join(sys._getframe(1).f_locals['sitedir'], *('jaraco',));importlib = has_mfs and __import__('importlib.util');has_mfs and __import__('importlib.machinery');m = has_mfs and sys.modules.setdefault('jaraco', importlib.util.module_from_spec(importlib.machinery.PathFinder.find_spec('jaraco', [os.path.dirname(p)])));m = m or sys.modules.setdefault('jaraco', types.ModuleType('jaraco'));mp = (m or []) and m.__dict__.setdefault('__path__',[]);(p not in mp) and mp.append(p)

View file

@ -3,7 +3,7 @@ Routines for obtaining the class names
of an object and its parent classes. of an object and its parent classes.
""" """
from __future__ import unicode_literals from more_itertools import unique_everseen
def all_bases(c): def all_bases(c):
@ -23,11 +23,12 @@ def all_classes(c):
""" """
return c.mro() return c.mro()
# borrowed from # borrowed from
# http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/ # http://code.activestate.com/recipes/576949-find-all-subclasses-of-a-given-class/
def iter_subclasses(cls, _seen=None): def iter_subclasses(cls):
""" """
Generator over all subclasses of a given class, in depth-first order. Generator over all subclasses of a given class, in depth-first order.
@ -45,7 +46,7 @@ def iter_subclasses(cls, _seen=None):
D D
E E
C C
>>> # get ALL (new-style) classes currently defined >>> # get ALL classes currently defined
>>> res = [cls.__name__ for cls in iter_subclasses(object)] >>> res = [cls.__name__ for cls in iter_subclasses(object)]
>>> 'type' in res >>> 'type' in res
True True
@ -54,22 +55,14 @@ def iter_subclasses(cls, _seen=None):
>>> len(res) > 100 >>> len(res) > 100
True True
""" """
return unique_everseen(_iter_all_subclasses(cls))
if not isinstance(cls, type):
raise TypeError( def _iter_all_subclasses(cls):
'iter_subclasses must be called with '
'new-style classes, not %.100r' % cls
)
if _seen is None:
_seen = set()
try: try:
subs = cls.__subclasses__() subs = cls.__subclasses__()
except TypeError: # fails only when cls is type except TypeError: # fails only when cls is type
subs = cls.__subclasses__(cls) subs = cls.__subclasses__(cls)
for sub in subs: for sub in subs:
if sub in _seen:
continue
_seen.add(sub)
yield sub
for sub in iter_subclasses(sub, _seen):
yield sub yield sub
yield from iter_subclasses(sub)

View file

@ -4,16 +4,27 @@ meta.py
Some useful metaclasses. Some useful metaclasses.
""" """
from __future__ import unicode_literals
class LeafClassesMeta(type): class LeafClassesMeta(type):
""" """
A metaclass for classes that keeps track of all of them that A metaclass for classes that keeps track of all of them that
aren't base classes. aren't base classes.
"""
_leaf_classes = set() >>> Parent = LeafClassesMeta('MyParentClass', (), {})
>>> Parent in Parent._leaf_classes
True
>>> Child = LeafClassesMeta('MyChildClass', (Parent,), {})
>>> Child in Parent._leaf_classes
True
>>> Parent in Parent._leaf_classes
False
>>> Other = LeafClassesMeta('OtherClass', (), {})
>>> Parent in Other._leaf_classes
False
>>> len(Other._leaf_classes)
1
"""
def __init__(cls, name, bases, attrs): def __init__(cls, name, bases, attrs):
if not hasattr(cls, '_leaf_classes'): if not hasattr(cls, '_leaf_classes'):
@ -28,7 +39,21 @@ class TagRegistered(type):
""" """
As classes of this metaclass are created, they keep a registry in the As classes of this metaclass are created, they keep a registry in the
base class of all classes by a class attribute, indicated by attr_name. base class of all classes by a class attribute, indicated by attr_name.
>>> FooObject = TagRegistered('FooObject', (), dict(tag='foo'))
>>> FooObject._registry['foo'] is FooObject
True
>>> BarObject = TagRegistered('Barobject', (FooObject,), dict(tag='bar'))
>>> FooObject._registry is BarObject._registry
True
>>> len(FooObject._registry)
2
'...' below should be 'jaraco.classes' but for pytest-dev/pytest#3396
>>> FooObject._registry['bar']
<class '....meta.Barobject'>
""" """
attr_name = 'tag' attr_name = 'tag'
def __init__(cls, name, bases, namespace): def __init__(cls, name, bases, namespace):

View file

@ -1,10 +1,3 @@
from __future__ import unicode_literals
import six
__metaclass__ = type
class NonDataProperty: class NonDataProperty:
"""Much like the property builtin, but only implements __get__, """Much like the property builtin, but only implements __get__,
making it a non-data property, and can be subsequently reset. making it a non-data property, and can be subsequently reset.
@ -22,11 +15,15 @@ class NonDataProperty:
>>> x.foo = 4 >>> x.foo = 4
>>> x.foo >>> x.foo
4 4
'...' below should be 'jaraco.classes' but for pytest-dev/pytest#3396
>>> X.foo
<....properties.NonDataProperty object at ...>
""" """
def __init__(self, fget): def __init__(self, fget):
assert fget is not None, "fget cannot be none" assert fget is not None, "fget cannot be none"
assert six.callable(fget), "fget must be callable" assert callable(fget), "fget must be callable"
self.fget = fget self.fget = fget
def __get__(self, obj, objtype=None): def __get__(self, obj, objtype=None):
@ -35,33 +32,139 @@ class NonDataProperty:
return self.fget(obj) return self.fget(obj)
# from http://stackoverflow.com/a/5191224 class classproperty:
class ClassPropertyDescriptor: """
Like @property but applies at the class level.
>>> class X(metaclass=classproperty.Meta):
... val = None
... @classproperty
... def foo(cls):
... return cls.val
... @foo.setter
... def foo(cls, val):
... cls.val = val
>>> X.foo
>>> X.foo = 3
>>> X.foo
3
>>> x = X()
>>> x.foo
3
>>> X.foo = 4
>>> x.foo
4
Setting the property on an instance affects the class.
>>> x.foo = 5
>>> x.foo
5
>>> X.foo
5
>>> vars(x)
{}
>>> X().foo
5
Attempting to set an attribute where no setter was defined
results in an AttributeError:
>>> class GetOnly(metaclass=classproperty.Meta):
... @classproperty
... def foo(cls):
... return 'bar'
>>> GetOnly.foo = 3
Traceback (most recent call last):
...
AttributeError: can't set attribute
It is also possible to wrap a classmethod or staticmethod in
a classproperty.
>>> class Static(metaclass=classproperty.Meta):
... @classproperty
... @classmethod
... def foo(cls):
... return 'foo'
... @classproperty
... @staticmethod
... def bar():
... return 'bar'
>>> Static.foo
'foo'
>>> Static.bar
'bar'
*Legacy*
For compatibility, if the metaclass isn't specified, the
legacy behavior will be invoked.
>>> class X:
... val = None
... @classproperty
... def foo(cls):
... return cls.val
... @foo.setter
... def foo(cls, val):
... cls.val = val
>>> X.foo
>>> X.foo = 3
>>> X.foo
3
>>> x = X()
>>> x.foo
3
>>> X.foo = 4
>>> x.foo
4
Note, because the metaclass was not specified, setting
a value on an instance does not have the intended effect.
>>> x.foo = 5
>>> x.foo
5
>>> X.foo # should be 5
4
>>> vars(x) # should be empty
{'foo': 5}
>>> X().foo # should be 5
4
"""
class Meta(type):
def __setattr__(self, key, value):
obj = self.__dict__.get(key, None)
if type(obj) is classproperty:
return obj.__set__(self, value)
return super().__setattr__(key, value)
def __init__(self, fget, fset=None): def __init__(self, fget, fset=None):
self.fget = fget self.fget = self._ensure_method(fget)
self.fset = fset self.fset = fset
fset and self.setter(fset)
def __get__(self, obj, klass=None): def __get__(self, instance, owner=None):
if klass is None: return self.fget.__get__(None, owner)()
klass = type(obj)
return self.fget.__get__(obj, klass)()
def __set__(self, obj, value): def __set__(self, owner, value):
if not self.fset: if not self.fset:
raise AttributeError("can't set attribute") raise AttributeError("can't set attribute")
type_ = type(obj) if type(owner) is not classproperty.Meta:
return self.fset.__get__(obj, type_)(value) owner = type(owner)
return self.fset.__get__(None, owner)(value)
def setter(self, func): def setter(self, fset):
if not isinstance(func, (classmethod, staticmethod)): self.fset = self._ensure_method(fset)
func = classmethod(func)
self.fset = func
return self return self
@classmethod
def classproperty(func): def _ensure_method(cls, fn):
if not isinstance(func, (classmethod, staticmethod)): """
func = classmethod(func) Ensure fn is a classmethod or staticmethod.
"""
return ClassPropertyDescriptor(func) needs_method = not isinstance(fn, (classmethod, staticmethod))
return classmethod(fn) if needs_method else fn

View file

@ -1,21 +1,11 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals, division
import re import re
import operator import operator
import collections import collections.abc
import itertools import itertools
import copy import copy
import functools import functools
import random
try:
import collections.abc
except ImportError:
# Python 2.7
collections.abc = collections
import six
from jaraco.classes.properties import NonDataProperty from jaraco.classes.properties import NonDataProperty
import jaraco.text import jaraco.text
@ -34,6 +24,14 @@ class Projection(collections.abc.Mapping):
>>> sorted(list(prj.keys())) >>> sorted(list(prj.keys()))
['a', 'c'] ['a', 'c']
Attempting to access a key not in the projection
results in a KeyError.
>>> prj['b']
Traceback (most recent call last):
...
KeyError: 'b'
Use the projection to update another dict. Use the projection to update another dict.
>>> target = {'a': 2, 'b': 2} >>> target = {'a': 2, 'b': 2}
@ -48,6 +46,7 @@ class Projection(collections.abc.Mapping):
>>> dict(prj) >>> dict(prj)
{'c': 3} {'c': 3}
""" """
def __init__(self, keys, space): def __init__(self, keys, space):
self._keys = tuple(keys) self._keys = tuple(keys)
self._space = space self._space = space
@ -64,7 +63,7 @@ class Projection(collections.abc.Mapping):
return len(tuple(iter(self))) return len(tuple(iter(self)))
class DictFilter(object): class DictFilter(collections.abc.Mapping):
""" """
Takes a dict, and simulates a sub-dict based on the keys. Takes a dict, and simulates a sub-dict based on the keys.
@ -72,6 +71,10 @@ class DictFilter(object):
>>> filtered = DictFilter(sample, ['a', 'c']) >>> filtered = DictFilter(sample, ['a', 'c'])
>>> filtered == {'a': 1, 'c': 3} >>> filtered == {'a': 1, 'c': 3}
True True
>>> set(filtered.values()) == {1, 3}
True
>>> set(filtered.items()) == {('a', 1), ('c', 3)}
True
One can also filter by a regular expression pattern One can also filter by a regular expression pattern
@ -84,15 +87,28 @@ class DictFilter(object):
>>> filtered == {'a': 1, 'b': 2, 'c': 3, 'd': 4} >>> filtered == {'a': 1, 'b': 2, 'c': 3, 'd': 4}
True True
>>> filtered['e']
Traceback (most recent call last):
...
KeyError: 'e'
>>> 'e' in filtered
False
Pattern is useful for excluding keys with a prefix.
>>> filtered = DictFilter(sample, include_pattern=r'(?![ace])')
>>> dict(filtered)
{'b': 2, 'd': 4}
Also note that DictFilter keeps a reference to the original dict, so Also note that DictFilter keeps a reference to the original dict, so
if you modify the original dict, that could modify the filtered dict. if you modify the original dict, that could modify the filtered dict.
>>> del sample['d'] >>> del sample['d']
>>> del sample['a'] >>> dict(filtered)
>>> filtered == {'b': 2, 'c': 3} {'b': 2}
True
""" """
def __init__(self, dict, include_keys=[], include_pattern=None): def __init__(self, dict, include_keys=[], include_pattern=None):
self.dict = dict self.dict = dict
self.specified_keys = set(include_keys) self.specified_keys = set(include_keys)
@ -105,35 +121,23 @@ class DictFilter(object):
def get_pattern_keys(self): def get_pattern_keys(self):
keys = filter(self.include_pattern.match, self.dict.keys()) keys = filter(self.include_pattern.match, self.dict.keys())
return set(keys) return set(keys)
pattern_keys = NonDataProperty(get_pattern_keys) pattern_keys = NonDataProperty(get_pattern_keys)
@property @property
def include_keys(self): def include_keys(self):
return self.specified_keys.union(self.pattern_keys) return self.specified_keys | self.pattern_keys
def keys(self):
return self.include_keys.intersection(self.dict.keys())
def values(self):
keys = self.keys()
values = map(self.dict.get, keys)
return values
def __getitem__(self, i): def __getitem__(self, i):
if i not in self.include_keys: if i not in self.include_keys:
return KeyError, i raise KeyError(i)
return self.dict[i] return self.dict[i]
def items(self): def __iter__(self):
keys = self.keys() return filter(self.include_keys.__contains__, self.dict.keys())
values = map(self.dict.get, keys)
return zip(keys, values)
def __eq__(self, other): def __len__(self):
return dict(self) == other return len(list(self))
def __ne__(self, other):
return dict(self) != other
def dict_map(function, dictionary): def dict_map(function, dictionary):
@ -158,7 +162,7 @@ class RangeMap(dict):
the sorted list of keys. the sorted list of keys.
One may supply keyword parameters to be passed to the sort function used One may supply keyword parameters to be passed to the sort function used
to sort keys (i.e. cmp [python 2 only], keys, reverse) as sort_params. to sort keys (i.e. key, reverse) as sort_params.
Let's create a map that maps 1-3 -> 'a', 4-6 -> 'b' Let's create a map that maps 1-3 -> 'a', 4-6 -> 'b'
@ -211,12 +215,36 @@ class RangeMap(dict):
>>> r.get(7, 'not found') >>> r.get(7, 'not found')
'not found' 'not found'
One often wishes to define the ranges by their left-most values,
which requires use of sort params and a key_match_comparator.
>>> r = RangeMap({1: 'a', 4: 'b'},
... sort_params=dict(reverse=True),
... key_match_comparator=operator.ge)
>>> r[1], r[2], r[3], r[4], r[5], r[6]
('a', 'a', 'a', 'b', 'b', 'b')
That wasn't nearly as easy as before, so an alternate constructor
is provided:
>>> r = RangeMap.left({1: 'a', 4: 'b', 7: RangeMap.undefined_value})
>>> r[1], r[2], r[3], r[4], r[5], r[6]
('a', 'a', 'a', 'b', 'b', 'b')
""" """
def __init__(self, source, sort_params={}, key_match_comparator=operator.le): def __init__(self, source, sort_params={}, key_match_comparator=operator.le):
dict.__init__(self, source) dict.__init__(self, source)
self.sort_params = sort_params self.sort_params = sort_params
self.match = key_match_comparator self.match = key_match_comparator
@classmethod
def left(cls, source):
return cls(
source, sort_params=dict(reverse=True), key_match_comparator=operator.ge
)
def __getitem__(self, item): def __getitem__(self, item):
sorted_keys = sorted(self.keys(), **self.sort_params) sorted_keys = sorted(self.keys(), **self.sort_params)
if isinstance(item, RangeMap.Item): if isinstance(item, RangeMap.Item):
@ -248,16 +276,14 @@ class RangeMap(dict):
def bounds(self): def bounds(self):
sorted_keys = sorted(self.keys(), **self.sort_params) sorted_keys = sorted(self.keys(), **self.sort_params)
return ( return (sorted_keys[RangeMap.first_item], sorted_keys[RangeMap.last_item])
sorted_keys[RangeMap.first_item],
sorted_keys[RangeMap.last_item],
)
# some special values for the RangeMap # some special values for the RangeMap
undefined_value = type(str('RangeValueUndefined'), (object,), {})() undefined_value = type(str('RangeValueUndefined'), (), {})()
class Item(int): class Item(int):
"RangeMap Item" "RangeMap Item"
first_item = Item(0) first_item = Item(0)
last_item = Item(-1) last_item = Item(-1)
@ -284,6 +310,7 @@ def sorted_items(d, key=__identity, reverse=False):
# wrap the key func so it operates on the first element of each item # wrap the key func so it operates on the first element of each item
def pairkey_key(item): def pairkey_key(item):
return key(item[0]) return key(item[0])
return sorted(d.items(), key=pairkey_key, reverse=reverse) return sorted(d.items(), key=pairkey_key, reverse=reverse)
@ -292,8 +319,9 @@ class KeyTransformingDict(dict):
A dict subclass that transforms the keys before they're used. A dict subclass that transforms the keys before they're used.
Subclasses may override the default transform_key to customize behavior. Subclasses may override the default transform_key to customize behavior.
""" """
@staticmethod @staticmethod
def transform_key(key): def transform_key(key): # pragma: nocover
return key return key
def __init__(self, *args, **kargs): def __init__(self, *args, **kargs):
@ -360,7 +388,7 @@ class FoldedCaseKeyedDict(KeyTransformingDict):
True True
>>> 'HELLO' in d >>> 'HELLO' in d
True True
>>> print(repr(FoldedCaseKeyedDict({'heLlo': 'world'})).replace("u'", "'")) >>> print(repr(FoldedCaseKeyedDict({'heLlo': 'world'})))
{'heLlo': 'world'} {'heLlo': 'world'}
>>> d = FoldedCaseKeyedDict({'heLlo': 'world'}) >>> d = FoldedCaseKeyedDict({'heLlo': 'world'})
>>> print(d['hello']) >>> print(d['hello'])
@ -411,13 +439,19 @@ class FoldedCaseKeyedDict(KeyTransformingDict):
>>> print(d.matching_key_for('this')) >>> print(d.matching_key_for('this'))
This This
>>> d.matching_key_for('missing')
Traceback (most recent call last):
...
KeyError: 'missing'
""" """
@staticmethod @staticmethod
def transform_key(key): def transform_key(key):
return jaraco.text.FoldedCase(key) return jaraco.text.FoldedCase(key)
class DictAdapter(object): class DictAdapter:
""" """
Provide a getitem interface for attributes of an object. Provide a getitem interface for attributes of an object.
@ -428,6 +462,7 @@ class DictAdapter(object):
>>> print("lowercase is %(ascii_lowercase)s" % DictAdapter(string)) >>> print("lowercase is %(ascii_lowercase)s" % DictAdapter(string))
lowercase is abcdefghijklmnopqrstuvwxyz lowercase is abcdefghijklmnopqrstuvwxyz
""" """
def __init__(self, wrapped_ob): def __init__(self, wrapped_ob):
self.object = wrapped_ob self.object = wrapped_ob
@ -435,7 +470,7 @@ class DictAdapter(object):
return getattr(self.object, name) return getattr(self.object, name)
class ItemsAsAttributes(object): class ItemsAsAttributes:
""" """
Mix-in class to enable a mapping object to provide items as Mix-in class to enable a mapping object to provide items as
attributes. attributes.
@ -479,6 +514,7 @@ class ItemsAsAttributes(object):
>>> i.foo >>> i.foo
'missing item' 'missing item'
""" """
def __getattr__(self, key): def __getattr__(self, key):
try: try:
return getattr(super(ItemsAsAttributes, self), key) return getattr(super(ItemsAsAttributes, self), key)
@ -492,14 +528,15 @@ class ItemsAsAttributes(object):
return cont[key] return cont[key]
except KeyError: except KeyError:
return missing_result return missing_result
result = _safe_getitem(self, key, noval) result = _safe_getitem(self, key, noval)
if result is not noval: if result is not noval:
return result return result
# raise the original exception, but use the original class # raise the original exception, but use the original class
# name, not 'super'. # name, not 'super'.
message, = e.args (message,) = e.args
message = message.replace('super', self.__class__.__name__, 1) message = message.replace('super', self.__class__.__name__, 1)
e.args = message, e.args = (message,)
raise raise
@ -542,7 +579,7 @@ class IdentityOverrideMap(dict):
return key return key
class DictStack(list, collections.abc.Mapping): class DictStack(list, collections.abc.MutableMapping):
""" """
A stack of dictionaries that behaves as a view on those dictionaries, A stack of dictionaries that behaves as a view on those dictionaries,
giving preference to the last. giving preference to the last.
@ -554,11 +591,18 @@ class DictStack(list, collections.abc.Mapping):
2 2
>>> stack['c'] >>> stack['c']
2 2
>>> len(stack)
3
>>> stack.push(dict(a=3)) >>> stack.push(dict(a=3))
>>> stack['a'] >>> stack['a']
3 3
>>> stack['a'] = 4
>>> set(stack.keys()) == set(['a', 'b', 'c']) >>> set(stack.keys()) == set(['a', 'b', 'c'])
True True
>>> set(stack.items()) == set([('a', 4), ('b', 2), ('c', 2)])
True
>>> dict(**stack) == dict(stack) == dict(a=4, c=2, b=2)
True
>>> d = stack.pop() >>> d = stack.pop()
>>> stack['a'] >>> stack['a']
2 2
@ -566,19 +610,43 @@ class DictStack(list, collections.abc.Mapping):
>>> stack['a'] >>> stack['a']
1 1
>>> stack.get('b', None) >>> stack.get('b', None)
>>> 'c' in stack
True
>>> del stack['c']
>>> dict(stack)
{'a': 1}
""" """
def keys(self): def __iter__(self):
return list(set(itertools.chain.from_iterable(c.keys() for c in self))) dicts = list.__iter__(self)
return iter(set(itertools.chain.from_iterable(c.keys() for c in dicts)))
def __getitem__(self, key): def __getitem__(self, key):
for scope in reversed(self): for scope in reversed(tuple(list.__iter__(self))):
if key in scope: if key in scope:
return scope[key] return scope[key]
raise KeyError(key) raise KeyError(key)
push = list.append push = list.append
def __contains__(self, other):
return collections.abc.Mapping.__contains__(self, other)
def __len__(self):
return len(list(iter(self)))
def __setitem__(self, key, item):
last = list.__getitem__(self, -1)
return last.__setitem__(key, item)
def __delitem__(self, key):
last = list.__getitem__(self, -1)
return last.__delitem__(key)
# workaround for mypy confusion
def pop(self, *args, **kwargs):
return list.pop(self, *args, **kwargs)
class BijectiveMap(dict): class BijectiveMap(dict):
""" """
@ -641,6 +709,7 @@ class BijectiveMap(dict):
>>> 'a' in m >>> 'a' in m
False False
""" """
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(BijectiveMap, self).__init__() super(BijectiveMap, self).__init__()
self.update(*args, **kwargs) self.update(*args, **kwargs)
@ -649,9 +718,10 @@ class BijectiveMap(dict):
if item == value: if item == value:
raise ValueError("Key cannot map to itself") raise ValueError("Key cannot map to itself")
overlap = ( overlap = (
item in self and self[item] != value item in self
or and self[item] != value
value in self and self[value] != item or value in self
and self[value] != item
) )
if overlap: if overlap:
raise ValueError("Key/Value pairs may not overlap") raise ValueError("Key/Value pairs may not overlap")
@ -690,6 +760,16 @@ class FrozenDict(collections.abc.Mapping, collections.abc.Hashable):
True True
>>> dict(a=1, b=2) == a >>> dict(a=1, b=2) == a
True True
>>> 'a' in a
True
>>> type(hash(a)) is type(0)
True
>>> set(iter(a)) == {'a', 'b'}
True
>>> len(a)
2
>>> a['a'] == a.get('a') == 1
True
>>> a['c'] = 3 >>> a['c'] = 3
Traceback (most recent call last): Traceback (most recent call last):
@ -719,6 +799,7 @@ class FrozenDict(collections.abc.Mapping, collections.abc.Hashable):
>>> a.copy() is not a >>> a.copy() is not a
True True
""" """
__slots__ = ['__data'] __slots__ = ['__data']
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
@ -732,7 +813,7 @@ class FrozenDict(collections.abc.Mapping, collections.abc.Hashable):
# Hashable # Hashable
def __hash__(self): def __hash__(self):
return hash(tuple(sorted(self.__data.iteritems()))) return hash(tuple(sorted(self.__data.items())))
# Mapping # Mapping
def __iter__(self): def __iter__(self):
@ -791,8 +872,9 @@ class Enumeration(ItemsAsAttributes, BijectiveMap):
>>> e[3] >>> e[3]
'c' 'c'
""" """
def __init__(self, names, codes=None): def __init__(self, names, codes=None):
if isinstance(names, six.string_types): if isinstance(names, str):
names = names.split() names = names.split()
if codes is None: if codes is None:
codes = itertools.count() codes = itertools.count()
@ -800,14 +882,14 @@ class Enumeration(ItemsAsAttributes, BijectiveMap):
@property @property
def names(self): def names(self):
return (key for key in self if isinstance(key, six.string_types)) return (key for key in self if isinstance(key, str))
@property @property
def codes(self): def codes(self):
return (self[name] for name in self.names) return (self[name] for name in self.names)
class Everything(object): class Everything:
""" """
A collection "containing" every possible thing. A collection "containing" every possible thing.
@ -821,11 +903,12 @@ class Everything(object):
>>> random.choice([None, 'foo', 42, ('a', 'b', 'c')]) in Everything() >>> random.choice([None, 'foo', 42, ('a', 'b', 'c')]) in Everything()
True True
""" """
def __contains__(self, other): def __contains__(self, other):
return True return True
class InstrumentedDict(six.moves.UserDict): class InstrumentedDict(collections.UserDict): # type: ignore # buggy mypy
""" """
Instrument an existing dictionary with additional Instrument an existing dictionary with additional
functionality, but always reference and mutate functionality, but always reference and mutate
@ -841,12 +924,13 @@ class InstrumentedDict(six.moves.UserDict):
>>> inst.keys() == orig.keys() >>> inst.keys() == orig.keys()
True True
""" """
def __init__(self, data): def __init__(self, data):
six.moves.UserDict.__init__(self) super().__init__()
self.data = data self.data = data
class Least(object): class Least:
""" """
A value that is always lesser than any other A value that is always lesser than any other
@ -869,14 +953,16 @@ class Least(object):
def __le__(self, other): def __le__(self, other):
return True return True
__lt__ = __le__ __lt__ = __le__
def __ge__(self, other): def __ge__(self, other):
return False return False
__gt__ = __ge__ __gt__ = __ge__
class Greatest(object): class Greatest:
""" """
A value that is always greater than any other A value that is always greater than any other
@ -899,8 +985,106 @@ class Greatest(object):
def __ge__(self, other): def __ge__(self, other):
return True return True
__gt__ = __ge__ __gt__ = __ge__
def __le__(self, other): def __le__(self, other):
return False return False
__lt__ = __le__ __lt__ = __le__
def pop_all(items):
"""
Clear items in place and return a copy of items.
>>> items = [1, 2, 3]
>>> popped = pop_all(items)
>>> popped is items
False
>>> popped
[1, 2, 3]
>>> items
[]
"""
result, items[:] = items[:], []
return result
# mypy disabled for pytest-dev/pytest#8332
class FreezableDefaultDict(collections.defaultdict): # type: ignore
"""
Often it is desirable to prevent the mutation of
a default dict after its initial construction, such
as to prevent mutation during iteration.
>>> dd = FreezableDefaultDict(list)
>>> dd[0].append('1')
>>> dd.freeze()
>>> dd[1]
[]
>>> len(dd)
1
"""
def __missing__(self, key):
return getattr(self, '_frozen', super().__missing__)(key)
def freeze(self):
self._frozen = lambda key: self.default_factory()
class Accumulator:
def __init__(self, initial=0):
self.val = initial
def __call__(self, val):
self.val += val
return self.val
class WeightedLookup(RangeMap):
"""
Given parameters suitable for a dict representing keys
and a weighted proportion, return a RangeMap representing
spans of values proportial to the weights:
>>> even = WeightedLookup(a=1, b=1)
[0, 1) -> a
[1, 2) -> b
>>> lk = WeightedLookup(a=1, b=2)
[0, 1) -> a
[1, 3) -> b
>>> lk[.5]
'a'
>>> lk[1.5]
'b'
Adds ``.random()`` to select a random weighted value:
>>> lk.random() in ['a', 'b']
True
>>> choices = [lk.random() for x in range(1000)]
Statistically speaking, choices should be .5 a:b
>>> ratio = choices.count('a') / choices.count('b')
>>> .4 < ratio < .6
True
"""
def __init__(self, *args, **kwargs):
raw = dict(*args, **kwargs)
# allocate keys by weight
indexes = map(Accumulator(), raw.values())
super().__init__(zip(indexes, raw.keys()), key_match_comparator=operator.lt)
def random(self):
lower, upper = self.bounds()
selector = random.random() * upper
return self[selector]

253
libs/win/jaraco/context.py Normal file
View file

@ -0,0 +1,253 @@
import os
import subprocess
import contextlib
import functools
import tempfile
import shutil
import operator
@contextlib.contextmanager
def pushd(dir):
orig = os.getcwd()
os.chdir(dir)
try:
yield dir
finally:
os.chdir(orig)
@contextlib.contextmanager
def tarball_context(url, target_dir=None, runner=None, pushd=pushd):
"""
Get a tarball, extract it, change to that directory, yield, then
clean up.
`runner` is the function to invoke commands.
`pushd` is a context manager for changing the directory.
"""
if target_dir is None:
target_dir = os.path.basename(url).replace('.tar.gz', '').replace('.tgz', '')
if runner is None:
runner = functools.partial(subprocess.check_call, shell=True)
# In the tar command, use --strip-components=1 to strip the first path and
# then
# use -C to cause the files to be extracted to {target_dir}. This ensures
# that we always know where the files were extracted.
runner('mkdir {target_dir}'.format(**vars()))
try:
getter = 'wget {url} -O -'
extract = 'tar x{compression} --strip-components=1 -C {target_dir}'
cmd = ' | '.join((getter, extract))
runner(cmd.format(compression=infer_compression(url), **vars()))
with pushd(target_dir):
yield target_dir
finally:
runner('rm -Rf {target_dir}'.format(**vars()))
def infer_compression(url):
"""
Given a URL or filename, infer the compression code for tar.
"""
# cheat and just assume it's the last two characters
compression_indicator = url[-2:]
mapping = dict(gz='z', bz='j', xz='J')
# Assume 'z' (gzip) if no match
return mapping.get(compression_indicator, 'z')
@contextlib.contextmanager
def temp_dir(remover=shutil.rmtree):
"""
Create a temporary directory context. Pass a custom remover
to override the removal behavior.
"""
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
remover(temp_dir)
@contextlib.contextmanager
def repo_context(url, branch=None, quiet=True, dest_ctx=temp_dir):
"""
Check out the repo indicated by url.
If dest_ctx is supplied, it should be a context manager
to yield the target directory for the check out.
"""
exe = 'git' if 'git' in url else 'hg'
with dest_ctx() as repo_dir:
cmd = [exe, 'clone', url, repo_dir]
if branch:
cmd.extend(['--branch', branch])
devnull = open(os.path.devnull, 'w')
stdout = devnull if quiet else None
subprocess.check_call(cmd, stdout=stdout)
yield repo_dir
@contextlib.contextmanager
def null():
yield
class ExceptionTrap:
"""
A context manager that will catch certain exceptions and provide an
indication they occurred.
>>> with ExceptionTrap() as trap:
... raise Exception()
>>> bool(trap)
True
>>> with ExceptionTrap() as trap:
... pass
>>> bool(trap)
False
>>> with ExceptionTrap(ValueError) as trap:
... raise ValueError("1 + 1 is not 3")
>>> bool(trap)
True
>>> with ExceptionTrap(ValueError) as trap:
... raise Exception()
Traceback (most recent call last):
...
Exception
>>> bool(trap)
False
"""
exc_info = None, None, None
def __init__(self, exceptions=(Exception,)):
self.exceptions = exceptions
def __enter__(self):
return self
@property
def type(self):
return self.exc_info[0]
@property
def value(self):
return self.exc_info[1]
@property
def tb(self):
return self.exc_info[2]
def __exit__(self, *exc_info):
type = exc_info[0]
matches = type and issubclass(type, self.exceptions)
if matches:
self.exc_info = exc_info
return matches
def __bool__(self):
return bool(self.type)
def raises(self, func, *, _test=bool):
"""
Wrap func and replace the result with the truth
value of the trap (True if an exception occurred).
First, give the decorator an alias to support Python 3.8
Syntax.
>>> raises = ExceptionTrap(ValueError).raises
Now decorate a function that always fails.
>>> @raises
... def fail():
... raise ValueError('failed')
>>> fail()
True
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
with ExceptionTrap(self.exceptions) as trap:
func(*args, **kwargs)
return _test(trap)
return wrapper
def passes(self, func):
"""
Wrap func and replace the result with the truth
value of the trap (True if no exception).
First, give the decorator an alias to support Python 3.8
Syntax.
>>> passes = ExceptionTrap(ValueError).passes
Now decorate a function that always fails.
>>> @passes
... def fail():
... raise ValueError('failed')
>>> fail()
False
"""
return self.raises(func, _test=operator.not_)
class suppress(contextlib.suppress, contextlib.ContextDecorator):
"""
A version of contextlib.suppress with decorator support.
>>> @suppress(KeyError)
... def key_error():
... {}['']
>>> key_error()
"""
class on_interrupt(contextlib.ContextDecorator):
"""
Replace a KeyboardInterrupt with SystemExit(1)
>>> def do_interrupt():
... raise KeyboardInterrupt()
>>> on_interrupt('error')(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 1
>>> on_interrupt('error', code=255)(do_interrupt)()
Traceback (most recent call last):
...
SystemExit: 255
>>> on_interrupt('suppress')(do_interrupt)()
>>> with __import__('pytest').raises(KeyboardInterrupt):
... on_interrupt('ignore')(do_interrupt)()
"""
def __init__(
self,
action='error',
# py3.7 compat
# /,
code=1,
):
self.action = action
self.code = code
def __enter__(self):
return self
def __exit__(self, exctype, excinst, exctb):
if exctype is not KeyboardInterrupt or self.action == 'ignore':
return
elif self.action == 'error':
raise SystemExit(self.code) from excinst
return self.action == 'suppress'

View file

@ -1,30 +1,16 @@
from __future__ import (
absolute_import, unicode_literals, print_function, division,
)
import functools import functools
import time import time
import warnings
import inspect import inspect
import collections import collections
from itertools import count import types
import itertools
__metaclass__ = type import more_itertools
from typing import Callable, TypeVar
try: CallableT = TypeVar("CallableT", bound=Callable[..., object])
from functools import lru_cache
except ImportError:
try:
from backports.functools_lru_cache import lru_cache
except ImportError:
try:
from functools32 import lru_cache
except ImportError:
warnings.warn("No lru_cache available")
import more_itertools.recipes
def compose(*funcs): def compose(*funcs):
@ -32,9 +18,9 @@ def compose(*funcs):
Compose any number of unary functions into a single unary function. Compose any number of unary functions into a single unary function.
>>> import textwrap >>> import textwrap
>>> from six import text_type >>> expected = str.strip(textwrap.dedent(compose.__doc__))
>>> stripped = text_type.strip(textwrap.dedent(compose.__doc__)) >>> strip_and_dedent = compose(str.strip, textwrap.dedent)
>>> compose(text_type.strip, textwrap.dedent)(compose.__doc__) == stripped >>> strip_and_dedent(compose.__doc__) == expected
True True
Compose also allows the innermost function to take arbitrary arguments. Compose also allows the innermost function to take arbitrary arguments.
@ -47,6 +33,7 @@ def compose(*funcs):
def compose_two(f1, f2): def compose_two(f1, f2):
return lambda *args, **kwargs: f1(f2(*args, **kwargs)) return lambda *args, **kwargs: f1(f2(*args, **kwargs))
return functools.reduce(compose_two, funcs) return functools.reduce(compose_two, funcs)
@ -60,9 +47,11 @@ def method_caller(method_name, *args, **kwargs):
>>> lower('MyString') >>> lower('MyString')
'mystring' 'mystring'
""" """
def call_method(target): def call_method(target):
func = getattr(target, method_name) func = getattr(target, method_name)
return func(*args, **kwargs) return func(*args, **kwargs)
return call_method return call_method
@ -97,16 +86,23 @@ def once(func):
>>> add_three(0) >>> add_three(0)
0 0
""" """
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
if not hasattr(wrapper, 'saved_result'): if not hasattr(wrapper, 'saved_result'):
wrapper.saved_result = func(*args, **kwargs) wrapper.saved_result = func(*args, **kwargs)
return wrapper.saved_result return wrapper.saved_result
wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result') wrapper.reset = lambda: vars(wrapper).__delitem__('saved_result')
return wrapper return wrapper
def method_cache(method, cache_wrapper=None): def method_cache(
method: CallableT,
cache_wrapper: Callable[
[CallableT], CallableT
] = functools.lru_cache(), # type: ignore[assignment]
) -> CallableT:
""" """
Wrap lru_cache to support storing the cache data in the object instances. Wrap lru_cache to support storing the cache data in the object instances.
@ -153,9 +149,14 @@ def method_cache(method, cache_wrapper=None):
>>> a.method.cache_clear() >>> a.method.cache_clear()
Same for a method that hasn't yet been called.
>>> c = MyClass()
>>> c.method.cache_clear()
Another cache wrapper may be supplied: Another cache wrapper may be supplied:
>>> cache = lru_cache(maxsize=2) >>> cache = functools.lru_cache(maxsize=2)
>>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache) >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
>>> a = MyClass() >>> a = MyClass()
>>> a.method2() >>> a.method2()
@ -168,16 +169,22 @@ def method_cache(method, cache_wrapper=None):
http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/ http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
for another implementation and additional justification. for another implementation and additional justification.
""" """
cache_wrapper = cache_wrapper or lru_cache()
def wrapper(self, *args, **kwargs): def wrapper(self: object, *args: object, **kwargs: object) -> object:
# it's the first call, replace the method with a cached, bound method # it's the first call, replace the method with a cached, bound method
bound_method = functools.partial(method, self) bound_method: CallableT = types.MethodType( # type: ignore[assignment]
method, self
)
cached_method = cache_wrapper(bound_method) cached_method = cache_wrapper(bound_method)
setattr(self, method.__name__, cached_method) setattr(self, method.__name__, cached_method)
return cached_method(*args, **kwargs) return cached_method(*args, **kwargs)
return _special_method_cache(method, cache_wrapper) or wrapper # Support cache clear even before cache has been created.
wrapper.cache_clear = lambda: None # type: ignore[attr-defined]
return ( # type: ignore[return-value]
_special_method_cache(method, cache_wrapper) or wrapper
)
def _special_method_cache(method, cache_wrapper): def _special_method_cache(method, cache_wrapper):
@ -200,7 +207,7 @@ def _special_method_cache(method, cache_wrapper):
def proxy(self, *args, **kwargs): def proxy(self, *args, **kwargs):
if wrapper_name not in vars(self): if wrapper_name not in vars(self):
bound = functools.partial(method, self) bound = types.MethodType(method, self)
cache = cache_wrapper(bound) cache = cache_wrapper(bound)
setattr(self, wrapper_name, cache) setattr(self, wrapper_name, cache)
else: else:
@ -217,12 +224,17 @@ def apply(transform):
>>> @apply(reversed) >>> @apply(reversed)
... def get_numbers(start): ... def get_numbers(start):
... "doc for get_numbers"
... return range(start, start+3) ... return range(start, start+3)
>>> list(get_numbers(4)) >>> list(get_numbers(4))
[6, 5, 4] [6, 5, 4]
>>> get_numbers.__doc__
'doc for get_numbers'
""" """
def wrap(func): def wrap(func):
return compose(transform, func) return functools.wraps(func)(compose(transform, func))
return wrap return wrap
@ -238,14 +250,19 @@ def result_invoke(action):
... return a + b ... return a + b
>>> x = add_two(2, 3) >>> x = add_two(2, 3)
5 5
>>> x
5
""" """
def wrap(func): def wrap(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
result = func(*args, **kwargs) result = func(*args, **kwargs)
action(result) action(result)
return result return result
return wrapper return wrapper
return wrap return wrap
@ -273,6 +290,7 @@ class Throttler:
""" """
Rate-limit a function (or other callable) Rate-limit a function (or other callable)
""" """
def __init__(self, func, max_rate=float('Inf')): def __init__(self, func, max_rate=float('Inf')):
if isinstance(func, Throttler): if isinstance(func, Throttler):
func = func.func func = func.func
@ -304,9 +322,11 @@ def first_invoke(func1, func2):
any parameters (for its side-effect) and then invoke func2 any parameters (for its side-effect) and then invoke func2
with whatever parameters were passed, returning its result. with whatever parameters were passed, returning its result.
""" """
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
func1() func1()
return func2(*args, **kwargs) return func2(*args, **kwargs)
return wrapper return wrapper
@ -317,7 +337,7 @@ def retry_call(func, cleanup=lambda: None, retries=0, trap=()):
exception. On the final attempt, allow any exceptions exception. On the final attempt, allow any exceptions
to propagate. to propagate.
""" """
attempts = count() if retries == float('inf') else range(retries) attempts = itertools.count() if retries == float('inf') else range(retries)
for attempt in attempts: for attempt in attempts:
try: try:
return func() return func()
@ -341,12 +361,15 @@ def retry(*r_args, **r_kwargs):
>>> my_func.__doc__ >>> my_func.__doc__
'this is my funk' 'this is my funk'
""" """
def decorate(func): def decorate(func):
@functools.wraps(func) @functools.wraps(func)
def wrapper(*f_args, **f_kwargs): def wrapper(*f_args, **f_kwargs):
bound = functools.partial(func, *f_args, **f_kwargs) bound = functools.partial(func, *f_args, **f_kwargs)
return retry_call(bound, *r_args, **r_kwargs) return retry_call(bound, *r_args, **r_kwargs)
return wrapper return wrapper
return decorate return decorate
@ -362,7 +385,7 @@ def print_yielded(func):
None None
""" """
print_all = functools.partial(map, print) print_all = functools.partial(map, print)
print_results = compose(more_itertools.recipes.consume, print_all, func) print_results = compose(more_itertools.consume, print_all, func)
return functools.wraps(func)(print_results) return functools.wraps(func)(print_results)
@ -375,10 +398,12 @@ def pass_none(func):
text text
>>> print_text(None) >>> print_text(None)
""" """
@functools.wraps(func) @functools.wraps(func)
def wrapper(param, *args, **kwargs): def wrapper(param, *args, **kwargs):
if param is not None: if param is not None:
return func(param, *args, **kwargs) return func(param, *args, **kwargs)
return wrapper return wrapper
@ -399,18 +424,18 @@ def assign_params(func, namespace):
>>> assigned() >>> assigned()
Traceback (most recent call last): Traceback (most recent call last):
TypeError: func() ...argument... TypeError: func() ...argument...
It even works on methods:
>>> class Handler:
... def meth(self, arg):
... print(arg)
>>> assign_params(Handler().meth, dict(arg='crystal', foo='clear'))()
crystal
""" """
try:
sig = inspect.signature(func) sig = inspect.signature(func)
params = sig.parameters.keys() params = sig.parameters.keys()
except AttributeError: call_ns = {k: namespace[k] for k in params if k in namespace}
spec = inspect.getargspec(func)
params = spec.args
call_ns = {
k: namespace[k]
for k in params
if k in namespace
}
return functools.partial(func, **call_ns) return functools.partial(func, **call_ns)
@ -456,4 +481,45 @@ def save_method_args(method):
attr = args_and_kwargs(args, kwargs) attr = args_and_kwargs(args, kwargs)
setattr(self, attr_name, attr) setattr(self, attr_name, attr)
return method(self, *args, **kwargs) return method(self, *args, **kwargs)
return wrapper return wrapper
def except_(*exceptions, replace=None, use=None):
"""
Replace the indicated exceptions, if raised, with the indicated
literal replacement or evaluated expression (if present).
>>> safe_int = except_(ValueError)(int)
>>> safe_int('five')
>>> safe_int('5')
5
Specify a literal replacement with ``replace``.
>>> safe_int_r = except_(ValueError, replace=0)(int)
>>> safe_int_r('five')
0
Provide an expression to ``use`` to pass through particular parameters.
>>> safe_int_pt = except_(ValueError, use='args[0]')(int)
>>> safe_int_pt('five')
'five'
"""
def decorate(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions:
try:
return eval(use)
except TypeError:
return replace
return wrapper
return decorate

View file

@ -1,5 +1,3 @@
from __future__ import absolute_import, unicode_literals
import numbers import numbers
from functools import reduce from functools import reduce
@ -12,7 +10,8 @@ def get_bit_values(number, size=32):
True True
>>> get_bit_values(0xDEADBEEF) >>> get_bit_values(0xDEADBEEF)
[1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1] [1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, \
1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1]
You may override the default word size of 32-bits to match your actual You may override the default word size of 32-bits to match your actual
application. application.
@ -23,7 +22,7 @@ def get_bit_values(number, size=32):
>>> get_bit_values(0x3, 4) >>> get_bit_values(0x3, 4)
[0, 0, 1, 1] [0, 0, 1, 1]
""" """
number += 2**size number += 2 ** size
return list(map(int, bin(number)[-size:])) return list(map(int, bin(number)[-size:]))
@ -47,11 +46,14 @@ def coalesce(bits):
>>> coalesce([1,0,1]) >>> coalesce([1,0,1])
5 5
""" """
operation = lambda a, b: (a << 1 | b)
def operation(a, b):
return a << 1 | b
return reduce(operation, bits) return reduce(operation, bits)
class Flags(object): class Flags:
""" """
Subclasses should define _names, a list of flag names beginning Subclasses should define _names, a list of flag names beginning
with the least-significant bit. with the least-significant bit.
@ -70,6 +72,7 @@ class Flags(object):
>>> mf.number >>> mf.number
6 6
""" """
def __init__(self, values): def __init__(self, values):
self._values = list(values) self._values = list(values)
if hasattr(self, '_names'): if hasattr(self, '_names'):
@ -106,17 +109,12 @@ class BitMask(type):
A metaclass to create a bitmask with attributes. Subclass an int and A metaclass to create a bitmask with attributes. Subclass an int and
set this as the metaclass to use. set this as the metaclass to use.
Here's how to create such a class on Python 3: Construct such a class:
class MyBits(int, metaclass=BitMask): >>> class MyBits(int, metaclass=BitMask):
a = 0x1 ... a = 0x1
b = 0x4 ... b = 0x4
c = 0x3 ... c = 0x3
For testing purposes, construct explicitly to support Python 2
>>> ns = dict(a=0x1, b=0x4, c=0x3)
>>> MyBits = BitMask(str('MyBits'), (int,), ns)
>>> b1 = MyBits(3) >>> b1 = MyBits(3)
>>> b1.a, b1.b, b1.c >>> b1.a, b1.b, b1.c
@ -128,10 +126,18 @@ class BitMask(type):
If the instance defines methods, they won't be wrapped in If the instance defines methods, they won't be wrapped in
properties. properties.
>>> ns['get_value'] = classmethod(lambda cls: 'some value') >>> class MyBits(int, metaclass=BitMask):
>>> ns['prop'] = property(lambda self: 'a property') ... a = 0x1
>>> MyBits = BitMask(str('MyBits'), (int,), ns) ... b = 0x4
... c = 0x3
...
... @classmethod
... def get_value(cls):
... return 'some value'
...
... @property
... def prop(cls):
... return 'a property'
>>> MyBits(3).get_value() >>> MyBits(3).get_value()
'some value' 'some value'
>>> MyBits(3).prop >>> MyBits(3).prop
@ -145,7 +151,6 @@ class BitMask(type):
return property(lambda self, value=value: bool(self & value)) return property(lambda self, value=value: bool(self & value))
newattrs = dict( newattrs = dict(
(name, make_property(name, value)) (name, make_property(name, value)) for name, value in attrs.items()
for name, value in attrs.items()
) )
return type.__new__(cls, name, bases, newattrs) return type.__new__(cls, name, bases, newattrs)

View file

@ -1,452 +0,0 @@
from __future__ import absolute_import, unicode_literals, print_function
import sys
import re
import inspect
import itertools
import textwrap
import functools
import six
import jaraco.collections
from jaraco.functools import compose
def substitution(old, new):
"""
Return a function that will perform a substitution on a string
"""
return lambda s: s.replace(old, new)
def multi_substitution(*substitutions):
"""
Take a sequence of pairs specifying substitutions, and create
a function that performs those substitutions.
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
'baz'
"""
substitutions = itertools.starmap(substitution, substitutions)
# compose function applies last function first, so reverse the
# substitutions to get the expected order.
substitutions = reversed(tuple(substitutions))
return compose(*substitutions)
class FoldedCase(six.text_type):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
You may test for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use in_:
>>> FoldedCase('hello').in_('Hello World')
True
"""
def __lt__(self, other):
return self.lower() < other.lower()
def __gt__(self, other):
return self.lower() > other.lower()
def __eq__(self, other):
return self.lower() == other.lower()
def __ne__(self, other):
return self.lower() != other.lower()
def __hash__(self):
return hash(self.lower())
def __contains__(self, other):
return super(FoldedCase, self).lower().__contains__(other.lower())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache lower since it's likely to be called frequently.
@jaraco.functools.method_cache
def lower(self):
return super(FoldedCase, self).lower()
def index(self, sub):
return self.lower().index(sub.lower())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
def local_format(string):
"""
format the string using variables in the caller's local namespace.
>>> a = 3
>>> local_format("{a:5}")
' 3'
"""
context = inspect.currentframe().f_back.f_locals
if sys.version_info < (3, 2):
return string.format(**context)
return string.format_map(context)
def global_format(string):
"""
format the string using variables in the caller's global namespace.
>>> a = 3
>>> fmt = "The func name: {global_format.__name__}"
>>> global_format(fmt)
'The func name: global_format'
"""
context = inspect.currentframe().f_back.f_globals
if sys.version_info < (3, 2):
return string.format(**context)
return string.format_map(context)
def namespace_format(string):
"""
Format the string using variable in the caller's scope (locals + globals).
>>> a = 3
>>> fmt = "A is {a} and this func is {namespace_format.__name__}"
>>> namespace_format(fmt)
'A is 3 and this func is namespace_format'
"""
context = jaraco.collections.DictStack()
context.push(inspect.currentframe().f_back.f_globals)
context.push(inspect.currentframe().f_back.f_locals)
if sys.version_info < (3, 2):
return string.format(**context)
return string.format_map(context)
def is_decodable(value):
r"""
Return True if the supplied value is decodable (using the default
encoding).
>>> is_decodable(b'\xff')
False
>>> is_decodable(b'\x32')
True
"""
# TODO: This code could be expressed more consisely and directly
# with a jaraco.context.ExceptionTrap, but that adds an unfortunate
# long dependency tree, so for now, use boolean literals.
try:
value.decode()
except UnicodeDecodeError:
return False
return True
def is_binary(value):
"""
Return True if the value appears to be binary (that is, it's a byte
string and isn't decodable).
"""
return isinstance(value, bytes) and not is_decodable(value)
def trim(s):
r"""
Trim something like a docstring to remove the whitespace that
is common due to indentation and formatting.
>>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
'foo = bar\n\tbar = baz'
"""
return textwrap.dedent(s).strip()
class Splitter(object):
"""object that will split a string with the given arguments for each call
>>> s = Splitter(',')
>>> s('hello, world, this is your, master calling')
['hello', ' world', ' this is your', ' master calling']
"""
def __init__(self, *args):
self.args = args
def __call__(self, s):
return s.split(*self.args)
def indent(string, prefix=' ' * 4):
return prefix + string
class WordSet(tuple):
"""
Given a Python identifier, return the words that identifier represents,
whether in camel case, underscore-separated, etc.
>>> WordSet.parse("camelCase")
('camel', 'Case')
>>> WordSet.parse("under_sep")
('under', 'sep')
Acronyms should be retained
>>> WordSet.parse("firstSNL")
('first', 'SNL')
>>> WordSet.parse("you_and_I")
('you', 'and', 'I')
>>> WordSet.parse("A simple test")
('A', 'simple', 'test')
Multiple caps should not interfere with the first cap of another word.
>>> WordSet.parse("myABCClass")
('my', 'ABC', 'Class')
The result is a WordSet, so you can get the form you need.
>>> WordSet.parse("myABCClass").underscore_separated()
'my_ABC_Class'
>>> WordSet.parse('a-command').camel_case()
'ACommand'
>>> WordSet.parse('someIdentifier').lowered().space_separated()
'some identifier'
Slices of the result should return another WordSet.
>>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
'out_of_context'
>>> WordSet.from_class_name(WordSet()).lowered().space_separated()
'word set'
"""
_pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
def capitalized(self):
return WordSet(word.capitalize() for word in self)
def lowered(self):
return WordSet(word.lower() for word in self)
def camel_case(self):
return ''.join(self.capitalized())
def headless_camel_case(self):
words = iter(self)
first = next(words).lower()
return itertools.chain((first,), WordSet(words).camel_case())
def underscore_separated(self):
return '_'.join(self)
def dash_separated(self):
return '-'.join(self)
def space_separated(self):
return ' '.join(self)
def __getitem__(self, item):
result = super(WordSet, self).__getitem__(item)
if isinstance(item, slice):
result = WordSet(result)
return result
# for compatibility with Python 2
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
@classmethod
def parse(cls, identifier):
matches = cls._pattern.finditer(identifier)
return WordSet(match.group(0) for match in matches)
@classmethod
def from_class_name(cls, subject):
return cls.parse(subject.__class__.__name__)
# for backward compatibility
words = WordSet.parse
def simple_html_strip(s):
r"""
Remove HTML from the string `s`.
>>> str(simple_html_strip(''))
''
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
A stormy day in paradise
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
Somebody tell the truth.
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
What about
multiple lines?
"""
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
texts = (
match.group(3) or ''
for match
in html_stripper.finditer(s)
)
return ''.join(texts)
class SeparatedValues(six.text_type):
"""
A string separated by a separator. Overrides __iter__ for getting
the values.
>>> list(SeparatedValues('a,b,c'))
['a', 'b', 'c']
Whitespace is stripped and empty values are discarded.
>>> list(SeparatedValues(' a, b , c, '))
['a', 'b', 'c']
"""
separator = ','
def __iter__(self):
parts = self.split(self.separator)
return six.moves.filter(None, (part.strip() for part in parts))
class Stripper:
r"""
Given a series of lines, find the common prefix and strip it from them.
>>> lines = [
... 'abcdefg\n',
... 'abc\n',
... 'abcde\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix
'abc'
>>> list(res.lines)
['defg\n', '\n', 'de\n']
If no prefix is common, nothing should be stripped.
>>> lines = [
... 'abcd\n',
... '1234\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix = ''
>>> list(res.lines)
['abcd\n', '1234\n']
"""
def __init__(self, prefix, lines):
self.prefix = prefix
self.lines = map(self, lines)
@classmethod
def strip_prefix(cls, lines):
prefix_lines, lines = itertools.tee(lines)
prefix = functools.reduce(cls.common_prefix, prefix_lines)
return cls(prefix, lines)
def __call__(self, line):
if not self.prefix:
return line
null, prefix, rest = line.partition(self.prefix)
return rest
@staticmethod
def common_prefix(s1, s2):
"""
Return the common prefix of two lines.
"""
index = min(len(s1), len(s2))
while s1[:index] != s2[:index]:
index -= 1
return s1[:index]
def remove_prefix(text, prefix):
"""
Remove the prefix from the text if it exists.
>>> remove_prefix('underwhelming performance', 'underwhelming ')
'performance'
>>> remove_prefix('something special', 'sample')
'something special'
"""
null, prefix, rest = text.rpartition(prefix)
return rest
def remove_suffix(text, suffix):
"""
Remove the suffix from the text if it exists.
>>> remove_suffix('name.git', '.git')
'name'
>>> remove_suffix('something special', 'sample')
'something special'
"""
rest, suffix, null = text.partition(suffix)
return rest

View file

@ -0,0 +1,2 @@
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus magna felis sollicitudin mauris. Integer in mauris eu nibh euismod gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue, eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis, neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis, molestie eu, feugiat in, orci. In hac habitasse platea dictumst.

View file

@ -0,0 +1,622 @@
import re
import itertools
import textwrap
import functools
try:
from importlib.resources import files # type: ignore
except ImportError: # pragma: nocover
from importlib_resources import files # type: ignore
from jaraco.functools import compose, method_cache
from jaraco.context import ExceptionTrap
def substitution(old, new):
"""
Return a function that will perform a substitution on a string
"""
return lambda s: s.replace(old, new)
def multi_substitution(*substitutions):
"""
Take a sequence of pairs specifying substitutions, and create
a function that performs those substitutions.
>>> multi_substitution(('foo', 'bar'), ('bar', 'baz'))('foo')
'baz'
"""
substitutions = itertools.starmap(substitution, substitutions)
# compose function applies last function first, so reverse the
# substitutions to get the expected order.
substitutions = reversed(tuple(substitutions))
return compose(*substitutions)
class FoldedCase(str):
"""
A case insensitive string class; behaves just like str
except compares equal when the only variation is case.
>>> s = FoldedCase('hello world')
>>> s == 'Hello World'
True
>>> 'Hello World' == s
True
>>> s != 'Hello World'
False
>>> s.index('O')
4
>>> s.split('O')
['hell', ' w', 'rld']
>>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
['alpha', 'Beta', 'GAMMA']
Sequence membership is straightforward.
>>> "Hello World" in [s]
True
>>> s in ["Hello World"]
True
Allows testing for set inclusion, but candidate and elements
must both be folded.
>>> FoldedCase("Hello World") in {s}
True
>>> s in {FoldedCase("Hello World")}
True
String inclusion works as long as the FoldedCase object
is on the right.
>>> "hello" in FoldedCase("Hello World")
True
But not if the FoldedCase object is on the left:
>>> FoldedCase('hello') in 'Hello World'
False
In that case, use ``in_``:
>>> FoldedCase('hello').in_('Hello World')
True
>>> FoldedCase('hello') > FoldedCase('Hello')
False
>>> FoldedCase('ß') == FoldedCase('ss')
True
"""
def __lt__(self, other):
return self.casefold() < other.casefold()
def __gt__(self, other):
return self.casefold() > other.casefold()
def __eq__(self, other):
return self.casefold() == other.casefold()
def __ne__(self, other):
return self.casefold() != other.casefold()
def __hash__(self):
return hash(self.casefold())
def __contains__(self, other):
return super().casefold().__contains__(other.casefold())
def in_(self, other):
"Does self appear in other?"
return self in FoldedCase(other)
# cache casefold since it's likely to be called frequently.
@method_cache
def casefold(self):
return super().casefold()
def index(self, sub):
return self.casefold().index(sub.casefold())
def split(self, splitter=' ', maxsplit=0):
pattern = re.compile(re.escape(splitter), re.I)
return pattern.split(self, maxsplit)
# Python 3.8 compatibility
_unicode_trap = ExceptionTrap(UnicodeDecodeError)
@_unicode_trap.passes
def is_decodable(value):
r"""
Return True if the supplied value is decodable (using the default
encoding).
>>> is_decodable(b'\xff')
False
>>> is_decodable(b'\x32')
True
"""
value.decode()
def is_binary(value):
r"""
Return True if the value appears to be binary (that is, it's a byte
string and isn't decodable).
>>> is_binary(b'\xff')
True
>>> is_binary('\xff')
False
"""
return isinstance(value, bytes) and not is_decodable(value)
def trim(s):
r"""
Trim something like a docstring to remove the whitespace that
is common due to indentation and formatting.
>>> trim("\n\tfoo = bar\n\t\tbar = baz\n")
'foo = bar\n\tbar = baz'
"""
return textwrap.dedent(s).strip()
def wrap(s):
"""
Wrap lines of text, retaining existing newlines as
paragraph markers.
>>> print(wrap(lorem_ipsum))
Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad
minim veniam, quis nostrud exercitation ullamco laboris nisi ut
aliquip ex ea commodo consequat. Duis aute irure dolor in
reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla
pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
<BLANKLINE>
Curabitur pretium tincidunt lacus. Nulla gravida orci a odio. Nullam
varius, turpis et commodo pharetra, est eros bibendum elit, nec luctus
magna felis sollicitudin mauris. Integer in mauris eu nibh euismod
gravida. Duis ac tellus et risus vulputate vehicula. Donec lobortis
risus a elit. Etiam tempor. Ut ullamcorper, ligula eu tempor congue,
eros est euismod turpis, id tincidunt sapien risus a quam. Maecenas
fermentum consequat mi. Donec fermentum. Pellentesque malesuada nulla
a mi. Duis sapien sem, aliquet nec, commodo eget, consequat quis,
neque. Aliquam faucibus, elit ut dictum aliquet, felis nisl adipiscing
sapien, sed malesuada diam lacus eget erat. Cras mollis scelerisque
nunc. Nullam arcu. Aliquam consequat. Curabitur augue lorem, dapibus
quis, laoreet et, pretium ac, nisi. Aenean magna nisl, mollis quis,
molestie eu, feugiat in, orci. In hac habitasse platea dictumst.
"""
paragraphs = s.splitlines()
wrapped = ('\n'.join(textwrap.wrap(para)) for para in paragraphs)
return '\n\n'.join(wrapped)
def unwrap(s):
r"""
Given a multi-line string, return an unwrapped version.
>>> wrapped = wrap(lorem_ipsum)
>>> wrapped.count('\n')
20
>>> unwrapped = unwrap(wrapped)
>>> unwrapped.count('\n')
1
>>> print(unwrapped)
Lorem ipsum dolor sit amet, consectetur adipiscing ...
Curabitur pretium tincidunt lacus. Nulla gravida orci ...
"""
paragraphs = re.split(r'\n\n+', s)
cleaned = (para.replace('\n', ' ') for para in paragraphs)
return '\n'.join(cleaned)
lorem_ipsum: str = files(__name__).joinpath('Lorem ipsum.txt').read_text()
class Splitter(object):
"""object that will split a string with the given arguments for each call
>>> s = Splitter(',')
>>> s('hello, world, this is your, master calling')
['hello', ' world', ' this is your', ' master calling']
"""
def __init__(self, *args):
self.args = args
def __call__(self, s):
return s.split(*self.args)
def indent(string, prefix=' ' * 4):
"""
>>> indent('foo')
' foo'
"""
return prefix + string
class WordSet(tuple):
"""
Given an identifier, return the words that identifier represents,
whether in camel case, underscore-separated, etc.
>>> WordSet.parse("camelCase")
('camel', 'Case')
>>> WordSet.parse("under_sep")
('under', 'sep')
Acronyms should be retained
>>> WordSet.parse("firstSNL")
('first', 'SNL')
>>> WordSet.parse("you_and_I")
('you', 'and', 'I')
>>> WordSet.parse("A simple test")
('A', 'simple', 'test')
Multiple caps should not interfere with the first cap of another word.
>>> WordSet.parse("myABCClass")
('my', 'ABC', 'Class')
The result is a WordSet, providing access to other forms.
>>> WordSet.parse("myABCClass").underscore_separated()
'my_ABC_Class'
>>> WordSet.parse('a-command').camel_case()
'ACommand'
>>> WordSet.parse('someIdentifier').lowered().space_separated()
'some identifier'
Slices of the result should return another WordSet.
>>> WordSet.parse('taken-out-of-context')[1:].underscore_separated()
'out_of_context'
>>> WordSet.from_class_name(WordSet()).lowered().space_separated()
'word set'
>>> example = WordSet.parse('figured it out')
>>> example.headless_camel_case()
'figuredItOut'
>>> example.dash_separated()
'figured-it-out'
"""
_pattern = re.compile('([A-Z]?[a-z]+)|([A-Z]+(?![a-z]))')
def capitalized(self):
return WordSet(word.capitalize() for word in self)
def lowered(self):
return WordSet(word.lower() for word in self)
def camel_case(self):
return ''.join(self.capitalized())
def headless_camel_case(self):
words = iter(self)
first = next(words).lower()
new_words = itertools.chain((first,), WordSet(words).camel_case())
return ''.join(new_words)
def underscore_separated(self):
return '_'.join(self)
def dash_separated(self):
return '-'.join(self)
def space_separated(self):
return ' '.join(self)
def trim_right(self, item):
"""
Remove the item from the end of the set.
>>> WordSet.parse('foo bar').trim_right('foo')
('foo', 'bar')
>>> WordSet.parse('foo bar').trim_right('bar')
('foo',)
>>> WordSet.parse('').trim_right('bar')
()
"""
return self[:-1] if self and self[-1] == item else self
def trim_left(self, item):
"""
Remove the item from the beginning of the set.
>>> WordSet.parse('foo bar').trim_left('foo')
('bar',)
>>> WordSet.parse('foo bar').trim_left('bar')
('foo', 'bar')
>>> WordSet.parse('').trim_left('bar')
()
"""
return self[1:] if self and self[0] == item else self
def trim(self, item):
"""
>>> WordSet.parse('foo bar').trim('foo')
('bar',)
"""
return self.trim_left(item).trim_right(item)
def __getitem__(self, item):
result = super(WordSet, self).__getitem__(item)
if isinstance(item, slice):
result = WordSet(result)
return result
@classmethod
def parse(cls, identifier):
matches = cls._pattern.finditer(identifier)
return WordSet(match.group(0) for match in matches)
@classmethod
def from_class_name(cls, subject):
return cls.parse(subject.__class__.__name__)
# for backward compatibility
words = WordSet.parse
def simple_html_strip(s):
r"""
Remove HTML from the string `s`.
>>> str(simple_html_strip(''))
''
>>> print(simple_html_strip('A <bold>stormy</bold> day in paradise'))
A stormy day in paradise
>>> print(simple_html_strip('Somebody <!-- do not --> tell the truth.'))
Somebody tell the truth.
>>> print(simple_html_strip('What about<br/>\nmultiple lines?'))
What about
multiple lines?
"""
html_stripper = re.compile('(<!--.*?-->)|(<[^>]*>)|([^<]+)', re.DOTALL)
texts = (match.group(3) or '' for match in html_stripper.finditer(s))
return ''.join(texts)
class SeparatedValues(str):
"""
A string separated by a separator. Overrides __iter__ for getting
the values.
>>> list(SeparatedValues('a,b,c'))
['a', 'b', 'c']
Whitespace is stripped and empty values are discarded.
>>> list(SeparatedValues(' a, b , c, '))
['a', 'b', 'c']
"""
separator = ','
def __iter__(self):
parts = self.split(self.separator)
return filter(None, (part.strip() for part in parts))
class Stripper:
r"""
Given a series of lines, find the common prefix and strip it from them.
>>> lines = [
... 'abcdefg\n',
... 'abc\n',
... 'abcde\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix
'abc'
>>> list(res.lines)
['defg\n', '\n', 'de\n']
If no prefix is common, nothing should be stripped.
>>> lines = [
... 'abcd\n',
... '1234\n',
... ]
>>> res = Stripper.strip_prefix(lines)
>>> res.prefix = ''
>>> list(res.lines)
['abcd\n', '1234\n']
"""
def __init__(self, prefix, lines):
self.prefix = prefix
self.lines = map(self, lines)
@classmethod
def strip_prefix(cls, lines):
prefix_lines, lines = itertools.tee(lines)
prefix = functools.reduce(cls.common_prefix, prefix_lines)
return cls(prefix, lines)
def __call__(self, line):
if not self.prefix:
return line
null, prefix, rest = line.partition(self.prefix)
return rest
@staticmethod
def common_prefix(s1, s2):
"""
Return the common prefix of two lines.
"""
index = min(len(s1), len(s2))
while s1[:index] != s2[:index]:
index -= 1
return s1[:index]
def remove_prefix(text, prefix):
"""
Remove the prefix from the text if it exists.
>>> remove_prefix('underwhelming performance', 'underwhelming ')
'performance'
>>> remove_prefix('something special', 'sample')
'something special'
"""
null, prefix, rest = text.rpartition(prefix)
return rest
def remove_suffix(text, suffix):
"""
Remove the suffix from the text if it exists.
>>> remove_suffix('name.git', '.git')
'name'
>>> remove_suffix('something special', 'sample')
'something special'
"""
rest, suffix, null = text.partition(suffix)
return rest
def normalize_newlines(text):
r"""
Replace alternate newlines with the canonical newline.
>>> normalize_newlines('Lorem Ipsum\u2029')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\r\n')
'Lorem Ipsum\n'
>>> normalize_newlines('Lorem Ipsum\x85')
'Lorem Ipsum\n'
"""
newlines = ['\r\n', '\r', '\n', '\u0085', '\u2028', '\u2029']
pattern = '|'.join(newlines)
return re.sub(pattern, '\n', text)
def _nonblank(str):
return str and not str.startswith('#')
@functools.singledispatch
def yield_lines(iterable):
r"""
Yield valid lines of a string or iterable.
>>> list(yield_lines(''))
[]
>>> list(yield_lines(['foo', 'bar']))
['foo', 'bar']
>>> list(yield_lines('foo\nbar'))
['foo', 'bar']
>>> list(yield_lines('\nfoo\n#bar\nbaz #comment'))
['foo', 'baz #comment']
>>> list(yield_lines(['foo\nbar', 'baz', 'bing\n\n\n']))
['foo', 'bar', 'baz', 'bing']
"""
return itertools.chain.from_iterable(map(yield_lines, iterable))
@yield_lines.register(str)
def _(text):
return filter(_nonblank, map(str.strip, text.splitlines()))
def drop_comment(line):
"""
Drop comments.
>>> drop_comment('foo # bar')
'foo'
A hash without a space may be in a URL.
>>> drop_comment('http://example.com/foo#bar')
'http://example.com/foo#bar'
"""
return line.partition(' #')[0]
def join_continuation(lines):
r"""
Join lines continued by a trailing backslash.
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar', 'baz']))
['foobar', 'baz']
>>> list(join_continuation(['foo \\', 'bar \\', 'baz']))
['foobarbaz']
Not sure why, but...
The character preceeding the backslash is also elided.
>>> list(join_continuation(['goo\\', 'dly']))
['godly']
A terrible idea, but...
If no line is available to continue, suppress the lines.
>>> list(join_continuation(['foo', 'bar\\', 'baz\\']))
['foo']
"""
lines = iter(lines)
for item in lines:
while item.endswith('\\'):
try:
item = item[:-2].strip() + next(lines)
except StopIteration:
return
yield item
def read_newlines(filename, limit=1024):
r"""
>>> tmp_path = getfixture('tmp_path')
>>> filename = tmp_path / 'out.txt'
>>> _ = filename.write_text('foo\n', newline='')
>>> read_newlines(filename)
'\n'
>>> _ = filename.write_text('foo\r\n', newline='')
>>> read_newlines(filename)
'\r\n'
>>> _ = filename.write_text('foo\r\nbar\nbing\r', newline='')
>>> read_newlines(filename)
('\r', '\n', '\r\n')
"""
with open(filename) as fp:
fp.read(limit)
return fp.newlines

View file

@ -0,0 +1,25 @@
qwerty = "-=qwertyuiop[]asdfghjkl;'zxcvbnm,./_+QWERTYUIOP{}ASDFGHJKL:\"ZXCVBNM<>?"
dvorak = "[]',.pyfgcrl/=aoeuidhtns-;qjkxbmwvz{}\"<>PYFGCRL?+AOEUIDHTNS_:QJKXBMWVZ"
to_dvorak = str.maketrans(qwerty, dvorak)
to_qwerty = str.maketrans(dvorak, qwerty)
def translate(input, translation):
"""
>>> translate('dvorak', to_dvorak)
'ekrpat'
>>> translate('qwerty', to_qwerty)
'x,dokt'
"""
return input.translate(translation)
def _translate_stream(stream, translation):
"""
>>> import io
>>> _translate_stream(io.StringIO('foo'), to_dvorak)
urr
"""
print(translate(stream.read(), translation))

View file

@ -0,0 +1,33 @@
import autocommand
import inflect
from more_itertools import always_iterable
import jaraco.text
def report_newlines(filename):
r"""
Report the newlines in the indicated file.
>>> tmp_path = getfixture('tmp_path')
>>> filename = tmp_path / 'out.txt'
>>> _ = filename.write_text('foo\nbar\n', newline='')
>>> report_newlines(filename)
newline is '\n'
>>> filename = tmp_path / 'out.txt'
>>> _ = filename.write_text('foo\nbar\r\n', newline='')
>>> report_newlines(filename)
newlines are ('\n', '\r\n')
"""
newlines = jaraco.text.read_newlines(filename)
count = len(tuple(always_iterable(newlines)))
engine = inflect.engine()
print(
engine.plural_noun("newline", count),
engine.plural_verb("is", count),
repr(newlines),
)
autocommand.autocommand(__name__)(report_newlines)

View file

@ -0,0 +1,21 @@
import sys
import autocommand
from jaraco.text import Stripper
def strip_prefix():
r"""
Strip any common prefix from stdin.
>>> import io, pytest
>>> getfixture('monkeypatch').setattr('sys.stdin', io.StringIO('abcdef\nabc123'))
>>> strip_prefix()
def
123
"""
sys.stdout.writelines(Stripper.strip_prefix(sys.stdin).lines)
autocommand.autocommand(__name__)(strip_prefix)

View file

@ -0,0 +1,6 @@
import sys
from . import layouts
__name__ == '__main__' and layouts._translate_stream(sys.stdin, layouts.to_dvorak)

View file

@ -0,0 +1,6 @@
import sys
from . import layouts
__name__ == '__main__' and layouts._translate_stream(sys.stdin, layouts.to_qwerty)

View file

@ -1,12 +1,10 @@
import argparse import argparse
import six
from jaraco.classes import meta from jaraco.classes import meta
from jaraco import text from jaraco import text # type: ignore
@six.add_metaclass(meta.LeafClassesMeta) class Command(metaclass=meta.LeafClassesMeta):
class Command(object):
""" """
A general-purpose base class for creating commands for a command-line A general-purpose base class for creating commands for a command-line
program using argparse. Each subclass of Command represents a separate program using argparse. Each subclass of Command represents a separate
@ -73,5 +71,6 @@ class Extend(argparse.Action):
>>> args.foo >>> args.foo
['a=1', 'b=2', 'c=3'] ['a=1', 'b=2', 'c=3']
""" """
def __call__(self, parser, namespace, values, option_string=None): def __call__(self, parser, namespace, values, option_string=None):
getattr(namespace, self.dest).extend(values) getattr(namespace, self.dest).extend(values)

View file

@ -1,5 +1,3 @@
from __future__ import unicode_literals, absolute_import
import tempfile import tempfile
import os import os
import sys import sys
@ -9,9 +7,12 @@ import collections
import io import io
import difflib import difflib
import six from typing import Mapping
class EditProcessException(RuntimeError):
pass
class EditProcessException(RuntimeError): pass
class EditableFile(object): class EditableFile(object):
""" """
@ -32,15 +33,16 @@ class EditableFile(object):
EDITOR is defined, defaults to 'notepad' on Windows and 'edit' on EDITOR is defined, defaults to 'notepad' on Windows and 'edit' on
other platforms. other platforms.
""" """
platform_default_editors = collections.defaultdict(
platform_default_editors: Mapping[str, str] = collections.defaultdict(
lambda: 'edit', lambda: 'edit',
win32 = 'notepad', win32='notepad',
linux2 = 'vi', linux2='vi',
) )
encoding = 'utf-8' encoding = 'utf-8'
def __init__(self, data='', content_type='text/plain'): def __init__(self, data='', content_type='text/plain'):
self.data = six.text_type(data) self.data = str(data)
self.content_type = content_type self.content_type = content_type
def __enter__(self): def __enter__(self):
@ -103,6 +105,7 @@ class EditableFile(object):
def _save_diff(*versions): def _save_diff(*versions):
def get_lines(content): def get_lines(content):
return list(io.StringIO(content)) return list(io.StringIO(content))
lines = map(get_lines, versions) lines = map(get_lines, versions)
diff = difflib.context_diff(*lines) diff = difflib.context_diff(*lines)
return tuple(diff) return tuple(diff)

View file

@ -4,7 +4,9 @@ This module currently provides a cross-platform getch function
try: try:
# Windows # Windows
from msvcrt import getch from msvcrt import getch # type: ignore
getch # workaround for https://github.com/kevinw/pyflakes/issues/13
except ImportError: except ImportError:
pass pass
@ -14,7 +16,7 @@ try:
import tty import tty
import termios import termios
def getch(): def getch(): # type: ignore
fd = sys.stdin.fileno() fd = sys.stdin.fileno()
old = termios.tcgetattr(fd) old = termios.tcgetattr(fd)
try: try:
@ -22,5 +24,7 @@ try:
return sys.stdin.read(1) return sys.stdin.read(1)
finally: finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old) termios.tcsetattr(fd, termios.TCSADRAIN, old)
except ImportError: except ImportError:
pass pass

View file

@ -1,13 +1,11 @@
from __future__ import print_function, absolute_import, unicode_literals
import itertools import itertools
import six
class Menu(object): class Menu(object):
""" """
A simple command-line based menu A simple command-line based menu
""" """
def __init__(self, choices=None, formatter=str): def __init__(self, choices=None, formatter=str):
self.choices = choices or list() self.choices = choices or list()
self.formatter = formatter self.formatter = formatter
@ -21,7 +19,7 @@ class Menu(object):
print(menu_fmt.format(**locals())) print(menu_fmt.format(**locals()))
print() print()
try: try:
answer = int(six.moves.input(prompt)) answer = int(input(prompt))
result = self.choices[answer - 1] result = self.choices[answer - 1]
except ValueError: except ValueError:
print('invalid selection') print('invalid selection')

View file

@ -1,19 +1,13 @@
# deprecated -- use TQDM # deprecated -- use TQDM
from __future__ import (print_function, absolute_import, unicode_literals,
division)
import time import time
import sys import sys
import itertools import itertools
import abc import abc
import datetime import datetime
import six
class AbstractProgressBar(metaclass=abc.ABCMeta):
@six.add_metaclass(abc.ABCMeta)
class AbstractProgressBar(object):
def __init__(self, unit='', size=70): def __init__(self, unit='', size=70):
""" """
Size is the nominal size in characters Size is the nominal size in characters
@ -81,7 +75,7 @@ class SimpleProgressBar(AbstractProgressBar):
def demo(cls): def demo(cls):
bar3 = cls(unit='cubes', size=30) bar3 = cls(unit='cubes', size=30)
with bar3: with bar3:
for x in six.moves.range(1, 759): for x in range(1, 759):
bar3.report(x) bar3.report(x)
time.sleep(0.01) time.sleep(0.01)
@ -102,12 +96,7 @@ class TargetProgressBar(AbstractProgressBar):
template += percent_str template += percent_str
summary = self.summary('{amt}/{total}') summary = self.summary('{amt}/{total}')
template += summary template += summary
empty = template.format( empty = template.format(total=self.total, bar='', bar_len=0, **locals())
total=self.total,
bar='',
bar_len=0,
**locals()
)
bar_len = self.size - len(empty) bar_len = self.size - len(empty)
bar = '=' * int(completed * bar_len) bar = '=' * int(completed * bar_len)
return template.format(total=self.total, **locals()) return template.format(total=self.total, **locals())
@ -116,13 +105,13 @@ class TargetProgressBar(AbstractProgressBar):
def demo(cls): def demo(cls):
bar1 = cls(100, 'blocks') bar1 = cls(100, 'blocks')
with bar1: with bar1:
for x in six.moves.range(1, 101): for x in range(1, 101):
bar1.report(x) bar1.report(x)
time.sleep(0.05) time.sleep(0.05)
bar2 = cls(758, size=50) bar2 = cls(758, size=50)
with bar2: with bar2:
for x in six.moves.range(1, 759): for x in range(1, 759):
bar2.report(x) bar2.report(x)
time.sleep(0.01) time.sleep(0.01)
@ -144,9 +133,9 @@ def countdown(template, duration=datetime.timedelta(seconds=5)):
remaining = deadline - datetime.datetime.now() remaining = deadline - datetime.datetime.now()
remaining = max(datetime.timedelta(), remaining) remaining = max(datetime.timedelta(), remaining)
msg = template.format(remaining.total_seconds()) msg = template.format(remaining.total_seconds())
print(msg, end=' '*10) print(msg, end=' ' * 10)
sys.stdout.flush() sys.stdout.flush()
time.sleep(.1) time.sleep(0.1)
print('\b'*80, end='') print('\b' * 80, end='')
sys.stdout.flush() sys.stdout.flush()
print() print()

Some files were not shown because too many files have changed in this diff Show more