From 76cc56a215e7cb9c2ff0b551e0d69c064e965bf9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 12 Nov 2022 17:53:03 -0800 Subject: [PATCH] Bump cherrypy from 18.6.1 to 18.8.0 (#1796) * Bump cherrypy from 18.6.1 to 18.8.0 Bumps [cherrypy](https://github.com/cherrypy/cherrypy) from 18.6.1 to 18.8.0. - [Release notes](https://github.com/cherrypy/cherrypy/releases) - [Changelog](https://github.com/cherrypy/cherrypy/blob/main/CHANGES.rst) - [Commits](https://github.com/cherrypy/cherrypy/compare/v18.6.1...v18.8.0) --- updated-dependencies: - dependency-name: cherrypy dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update cherrypy==18.8.0 Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com> --- lib/autocommand/__init__.py | 27 + lib/autocommand/autoasync.py | 140 + lib/autocommand/autocommand.py | 70 + lib/autocommand/automain.py | 59 + lib/autocommand/autoparse.py | 333 ++ lib/autocommand/errors.py | 23 + lib/cherrypy/_cpdispatch.py | 8 +- lib/cherrypy/_cperror.py | 5 +- lib/cherrypy/_cpmodpy.py | 5 +- lib/cherrypy/_cprequest.py | 7 +- lib/cherrypy/lib/auth_digest.py | 13 +- lib/cherrypy/lib/covercp.py | 5 +- lib/cherrypy/lib/httputil.py | 30 + lib/cherrypy/lib/reprconf.py | 5 +- lib/cherrypy/lib/sessions.py | 10 +- lib/cherrypy/process/plugins.py | 3 +- lib/cherrypy/test/helper.py | 3 +- lib/cherrypy/test/logtest.py | 33 +- lib/cherrypy/test/modfastcgi.py | 7 +- lib/cherrypy/test/modfcgid.py | 7 +- lib/cherrypy/test/modpy.py | 7 +- lib/cherrypy/test/modwsgi.py | 7 +- lib/cherrypy/test/test_auth_basic.py | 2 +- lib/cherrypy/test/test_auth_digest.py | 2 +- lib/cherrypy/test/test_core.py | 5 +- lib/cherrypy/test/test_encoding.py | 2 +- lib/cherrypy/test/test_logging.py | 4 +- lib/cherrypy/test/test_request_obj.py | 16 +- lib/cherrypy/test/test_states.py | 11 +- lib/cherrypy/test/test_tutorials.py | 4 +- lib/cherrypy/tutorial/pdf_file.pdf | Bin 85698 -> 11961 bytes lib/cherrypy/tutorial/tut04_complex_site.py | 4 +- lib/inflect/__init__.py | 3991 +++++++++++++++++++ lib/inflect/py.typed | 0 lib/jaraco/classes/properties.py | 11 +- lib/jaraco/collections.py | 100 +- lib/jaraco/text/__init__.py | 46 +- lib/jaraco/text/layouts.py | 25 + lib/jaraco/text/show-newlines.py | 33 + lib/jaraco/text/to-dvorak.py | 6 + lib/jaraco/text/to-qwerty.py | 6 + lib/more_itertools/__init__.py | 4 +- lib/more_itertools/more.py | 226 +- lib/more_itertools/more.pyi | 36 +- lib/more_itertools/recipes.py | 175 +- lib/more_itertools/recipes.pyi | 26 +- lib/pydantic/__init__.py | 131 + lib/pydantic/_hypothesis_plugin.py | 386 ++ lib/pydantic/annotated_types.py | 72 + lib/pydantic/class_validators.py | 342 ++ lib/pydantic/color.py | 494 +++ lib/pydantic/config.py | 192 + lib/pydantic/dataclasses.py | 479 +++ lib/pydantic/datetime_parse.py | 248 ++ lib/pydantic/decorator.py | 264 ++ lib/pydantic/env_settings.py | 346 ++ lib/pydantic/error_wrappers.py | 162 + lib/pydantic/errors.py | 646 +++ lib/pydantic/fields.py | 1247 ++++++ lib/pydantic/generics.py | 364 ++ lib/pydantic/json.py | 112 + lib/pydantic/main.py | 1109 ++++++ lib/pydantic/mypy.py | 850 ++++ lib/pydantic/networks.py | 736 ++++ lib/pydantic/parse.py | 66 + lib/pydantic/py.typed | 0 lib/pydantic/schema.py | 1153 ++++++ lib/pydantic/tools.py | 92 + lib/pydantic/types.py | 1187 ++++++ lib/pydantic/typing.py | 602 +++ lib/pydantic/utils.py | 841 ++++ lib/pydantic/validators.py | 765 ++++ lib/pydantic/version.py | 38 + lib/typing_extensions.py | 2021 +++++----- requirements.txt | 2 +- 75 files changed, 19150 insertions(+), 1339 deletions(-) create mode 100644 lib/autocommand/__init__.py create mode 100644 lib/autocommand/autoasync.py create mode 100644 lib/autocommand/autocommand.py create mode 100644 lib/autocommand/automain.py create mode 100644 lib/autocommand/autoparse.py create mode 100644 lib/autocommand/errors.py create mode 100644 lib/inflect/__init__.py create mode 100644 lib/inflect/py.typed create mode 100644 lib/jaraco/text/layouts.py create mode 100644 lib/jaraco/text/show-newlines.py create mode 100644 lib/jaraco/text/to-dvorak.py create mode 100644 lib/jaraco/text/to-qwerty.py create mode 100644 lib/pydantic/__init__.py create mode 100644 lib/pydantic/_hypothesis_plugin.py create mode 100644 lib/pydantic/annotated_types.py create mode 100644 lib/pydantic/class_validators.py create mode 100644 lib/pydantic/color.py create mode 100644 lib/pydantic/config.py create mode 100644 lib/pydantic/dataclasses.py create mode 100644 lib/pydantic/datetime_parse.py create mode 100644 lib/pydantic/decorator.py create mode 100644 lib/pydantic/env_settings.py create mode 100644 lib/pydantic/error_wrappers.py create mode 100644 lib/pydantic/errors.py create mode 100644 lib/pydantic/fields.py create mode 100644 lib/pydantic/generics.py create mode 100644 lib/pydantic/json.py create mode 100644 lib/pydantic/main.py create mode 100644 lib/pydantic/mypy.py create mode 100644 lib/pydantic/networks.py create mode 100644 lib/pydantic/parse.py create mode 100644 lib/pydantic/py.typed create mode 100644 lib/pydantic/schema.py create mode 100644 lib/pydantic/tools.py create mode 100644 lib/pydantic/types.py create mode 100644 lib/pydantic/typing.py create mode 100644 lib/pydantic/utils.py create mode 100644 lib/pydantic/validators.py create mode 100644 lib/pydantic/version.py diff --git a/lib/autocommand/__init__.py b/lib/autocommand/__init__.py new file mode 100644 index 00000000..73fbfca6 --- /dev/null +++ b/lib/autocommand/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2014-2016 Nathan West +# +# This file is part of autocommand. +# +# autocommand is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# autocommand is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with autocommand. If not, see . + +# flake8 flags all these imports as unused, hence the NOQAs everywhere. + +from .automain import automain # NOQA +from .autoparse import autoparse, smart_open # NOQA +from .autocommand import autocommand # NOQA + +try: + from .autoasync import autoasync # NOQA +except ImportError: # pragma: no cover + pass diff --git a/lib/autocommand/autoasync.py b/lib/autocommand/autoasync.py new file mode 100644 index 00000000..3c8ebdcf --- /dev/null +++ b/lib/autocommand/autoasync.py @@ -0,0 +1,140 @@ +# Copyright 2014-2015 Nathan West +# +# This file is part of autocommand. +# +# autocommand is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# autocommand is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with autocommand. If not, see . + +from asyncio import get_event_loop, iscoroutine +from functools import wraps +from inspect import signature + + +def _launch_forever_coro(coro, args, kwargs, loop): + ''' + This helper function launches an async main function that was tagged with + forever=True. There are two possibilities: + + - The function is a normal function, which handles initializing the event + loop, which is then run forever + - The function is a coroutine, which needs to be scheduled in the event + loop, which is then run forever + - There is also the possibility that the function is a normal function + wrapping a coroutine function + + The function is therefore called unconditionally and scheduled in the event + loop if the return value is a coroutine object. + + The reason this is a separate function is to make absolutely sure that all + the objects created are garbage collected after all is said and done; we + do this to ensure that any exceptions raised in the tasks are collected + ASAP. + ''' + + # Personal note: I consider this an antipattern, as it relies on the use of + # unowned resources. The setup function dumps some stuff into the event + # loop where it just whirls in the ether without a well defined owner or + # lifetime. For this reason, there's a good chance I'll remove the + # forever=True feature from autoasync at some point in the future. + thing = coro(*args, **kwargs) + if iscoroutine(thing): + loop.create_task(thing) + + +def autoasync(coro=None, *, loop=None, forever=False, pass_loop=False): + ''' + Convert an asyncio coroutine into a function which, when called, is + evaluted in an event loop, and the return value returned. This is intented + to make it easy to write entry points into asyncio coroutines, which + otherwise need to be explictly evaluted with an event loop's + run_until_complete. + + If `loop` is given, it is used as the event loop to run the coro in. If it + is None (the default), the loop is retreived using asyncio.get_event_loop. + This call is defered until the decorated function is called, so that + callers can install custom event loops or event loop policies after + @autoasync is applied. + + If `forever` is True, the loop is run forever after the decorated coroutine + is finished. Use this for servers created with asyncio.start_server and the + like. + + If `pass_loop` is True, the event loop object is passed into the coroutine + as the `loop` kwarg when the wrapper function is called. In this case, the + wrapper function's __signature__ is updated to remove this parameter, so + that autoparse can still be used on it without generating a parameter for + `loop`. + + This coroutine can be called with ( @autoasync(...) ) or without + ( @autoasync ) arguments. + + Examples: + + @autoasync + def get_file(host, port): + reader, writer = yield from asyncio.open_connection(host, port) + data = reader.read() + sys.stdout.write(data.decode()) + + get_file(host, port) + + @autoasync(forever=True, pass_loop=True) + def server(host, port, loop): + yield_from loop.create_server(Proto, host, port) + + server('localhost', 8899) + + ''' + if coro is None: + return lambda c: autoasync( + c, loop=loop, + forever=forever, + pass_loop=pass_loop) + + # The old and new signatures are required to correctly bind the loop + # parameter in 100% of cases, even if it's a positional parameter. + # NOTE: A future release will probably require the loop parameter to be + # a kwonly parameter. + if pass_loop: + old_sig = signature(coro) + new_sig = old_sig.replace(parameters=( + param for name, param in old_sig.parameters.items() + if name != "loop")) + + @wraps(coro) + def autoasync_wrapper(*args, **kwargs): + # Defer the call to get_event_loop so that, if a custom policy is + # installed after the autoasync decorator, it is respected at call time + local_loop = get_event_loop() if loop is None else loop + + # Inject the 'loop' argument. We have to use this signature binding to + # ensure it's injected in the correct place (positional, keyword, etc) + if pass_loop: + bound_args = old_sig.bind_partial() + bound_args.arguments.update( + loop=local_loop, + **new_sig.bind(*args, **kwargs).arguments) + args, kwargs = bound_args.args, bound_args.kwargs + + if forever: + _launch_forever_coro(coro, args, kwargs, local_loop) + local_loop.run_forever() + else: + return local_loop.run_until_complete(coro(*args, **kwargs)) + + # Attach the updated signature. This allows 'pass_loop' to be used with + # autoparse + if pass_loop: + autoasync_wrapper.__signature__ = new_sig + + return autoasync_wrapper diff --git a/lib/autocommand/autocommand.py b/lib/autocommand/autocommand.py new file mode 100644 index 00000000..097e86de --- /dev/null +++ b/lib/autocommand/autocommand.py @@ -0,0 +1,70 @@ +# Copyright 2014-2015 Nathan West +# +# This file is part of autocommand. +# +# autocommand is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# autocommand is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with autocommand. If not, see . + +from .autoparse import autoparse +from .automain import automain +try: + from .autoasync import autoasync +except ImportError: # pragma: no cover + pass + + +def autocommand( + module, *, + description=None, + epilog=None, + add_nos=False, + parser=None, + loop=None, + forever=False, + pass_loop=False): + + if callable(module): + raise TypeError('autocommand requires a module name argument') + + def autocommand_decorator(func): + # Step 1: if requested, run it all in an asyncio event loop. autoasync + # patches the __signature__ of the decorated function, so that in the + # event that pass_loop is True, the `loop` parameter of the original + # function will *not* be interpreted as a command-line argument by + # autoparse + if loop is not None or forever or pass_loop: + func = autoasync( + func, + loop=None if loop is True else loop, + pass_loop=pass_loop, + forever=forever) + + # Step 2: create parser. We do this second so that the arguments are + # parsed and passed *before* entering the asyncio event loop, if it + # exists. This simplifies the stack trace and ensures errors are + # reported earlier. It also ensures that errors raised during parsing & + # passing are still raised if `forever` is True. + func = autoparse( + func, + description=description, + epilog=epilog, + add_nos=add_nos, + parser=parser) + + # Step 3: call the function automatically if __name__ == '__main__' (or + # if True was provided) + func = automain(module)(func) + + return func + + return autocommand_decorator diff --git a/lib/autocommand/automain.py b/lib/autocommand/automain.py new file mode 100644 index 00000000..6cc45db6 --- /dev/null +++ b/lib/autocommand/automain.py @@ -0,0 +1,59 @@ +# Copyright 2014-2015 Nathan West +# +# This file is part of autocommand. +# +# autocommand is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# autocommand is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with autocommand. If not, see . + +import sys +from .errors import AutocommandError + + +class AutomainRequiresModuleError(AutocommandError, TypeError): + pass + + +def automain(module, *, args=(), kwargs=None): + ''' + This decorator automatically invokes a function if the module is being run + as the "__main__" module. Optionally, provide args or kwargs with which to + call the function. If `module` is "__main__", the function is called, and + the program is `sys.exit`ed with the return value. You can also pass `True` + to cause the function to be called unconditionally. If the function is not + called, it is returned unchanged by the decorator. + + Usage: + + @automain(__name__) # Pass __name__ to check __name__=="__main__" + def main(): + ... + + If __name__ is "__main__" here, the main function is called, and then + sys.exit called with the return value. + ''' + + # Check that @automain(...) was called, rather than @automain + if callable(module): + raise AutomainRequiresModuleError(module) + + if module == '__main__' or module is True: + if kwargs is None: + kwargs = {} + + # Use a function definition instead of a lambda for a neater traceback + def automain_decorator(main): + sys.exit(main(*args, **kwargs)) + + return automain_decorator + else: + return lambda main: main diff --git a/lib/autocommand/autoparse.py b/lib/autocommand/autoparse.py new file mode 100644 index 00000000..0276a3fa --- /dev/null +++ b/lib/autocommand/autoparse.py @@ -0,0 +1,333 @@ +# Copyright 2014-2015 Nathan West +# +# This file is part of autocommand. +# +# autocommand is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# autocommand is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with autocommand. If not, see . + +import sys +from re import compile as compile_regex +from inspect import signature, getdoc, Parameter +from argparse import ArgumentParser +from contextlib import contextmanager +from functools import wraps +from io import IOBase +from autocommand.errors import AutocommandError + + +_empty = Parameter.empty + + +class AnnotationError(AutocommandError): + '''Annotation error: annotation must be a string, type, or tuple of both''' + + +class PositionalArgError(AutocommandError): + ''' + Postional Arg Error: autocommand can't handle postional-only parameters + ''' + + +class KWArgError(AutocommandError): + '''kwarg Error: autocommand can't handle a **kwargs parameter''' + + +class DocstringError(AutocommandError): + '''Docstring error''' + + +class TooManySplitsError(DocstringError): + ''' + The docstring had too many ---- section splits. Currently we only support + using up to a single split, to split the docstring into description and + epilog parts. + ''' + + +def _get_type_description(annotation): + ''' + Given an annotation, return the (type, description) for the parameter. + If you provide an annotation that is somehow both a string and a callable, + the behavior is undefined. + ''' + if annotation is _empty: + return None, None + elif callable(annotation): + return annotation, None + elif isinstance(annotation, str): + return None, annotation + elif isinstance(annotation, tuple): + try: + arg1, arg2 = annotation + except ValueError as e: + raise AnnotationError(annotation) from e + else: + if callable(arg1) and isinstance(arg2, str): + return arg1, arg2 + elif isinstance(arg1, str) and callable(arg2): + return arg2, arg1 + + raise AnnotationError(annotation) + + +def _add_arguments(param, parser, used_char_args, add_nos): + ''' + Add the argument(s) to an ArgumentParser (using add_argument) for a given + parameter. used_char_args is the set of -short options currently already in + use, and is updated (if necessary) by this function. If add_nos is True, + this will also add an inverse switch for all boolean options. For + instance, for the boolean parameter "verbose", this will create --verbose + and --no-verbose. + ''' + + # Impl note: This function is kept separate from make_parser because it's + # already very long and I wanted to separate out as much as possible into + # its own call scope, to prevent even the possibility of suble mutation + # bugs. + if param.kind is param.POSITIONAL_ONLY: + raise PositionalArgError(param) + elif param.kind is param.VAR_KEYWORD: + raise KWArgError(param) + + # These are the kwargs for the add_argument function. + arg_spec = {} + is_option = False + + # Get the type and default from the annotation. + arg_type, description = _get_type_description(param.annotation) + + # Get the default value + default = param.default + + # If there is no explicit type, and the default is present and not None, + # infer the type from the default. + if arg_type is None and default not in {_empty, None}: + arg_type = type(default) + + # Add default. The presence of a default means this is an option, not an + # argument. + if default is not _empty: + arg_spec['default'] = default + is_option = True + + # Add the type + if arg_type is not None: + # Special case for bool: make it just a --switch + if arg_type is bool: + if not default or default is _empty: + arg_spec['action'] = 'store_true' + else: + arg_spec['action'] = 'store_false' + + # Switches are always options + is_option = True + + # Special case for file types: make it a string type, for filename + elif isinstance(default, IOBase): + arg_spec['type'] = str + + # TODO: special case for list type. + # - How to specificy type of list members? + # - param: [int] + # - param: int =[] + # - action='append' vs nargs='*' + + else: + arg_spec['type'] = arg_type + + # nargs: if the signature includes *args, collect them as trailing CLI + # arguments in a list. *args can't have a default value, so it can never be + # an option. + if param.kind is param.VAR_POSITIONAL: + # TODO: consider depluralizing metavar/name here. + arg_spec['nargs'] = '*' + + # Add description. + if description is not None: + arg_spec['help'] = description + + # Get the --flags + flags = [] + name = param.name + + if is_option: + # Add the first letter as a -short option. + for letter in name[0], name[0].swapcase(): + if letter not in used_char_args: + used_char_args.add(letter) + flags.append('-{}'.format(letter)) + break + + # If the parameter is a --long option, or is a -short option that + # somehow failed to get a flag, add it. + if len(name) > 1 or not flags: + flags.append('--{}'.format(name)) + + arg_spec['dest'] = name + else: + flags.append(name) + + parser.add_argument(*flags, **arg_spec) + + # Create the --no- version for boolean switches + if add_nos and arg_type is bool: + parser.add_argument( + '--no-{}'.format(name), + action='store_const', + dest=name, + const=default if default is not _empty else False) + + +def make_parser(func_sig, description, epilog, add_nos): + ''' + Given the signature of a function, create an ArgumentParser + ''' + parser = ArgumentParser(description=description, epilog=epilog) + + used_char_args = {'h'} + + # Arange the params so that single-character arguments are first. This + # esnures they don't have to get --long versions. sorted is stable, so the + # parameters will otherwise still be in relative order. + params = sorted( + func_sig.parameters.values(), + key=lambda param: len(param.name) > 1) + + for param in params: + _add_arguments(param, parser, used_char_args, add_nos) + + return parser + + +_DOCSTRING_SPLIT = compile_regex(r'\n\s*-{4,}\s*\n') + + +def parse_docstring(docstring): + ''' + Given a docstring, parse it into a description and epilog part + ''' + if docstring is None: + return '', '' + + parts = _DOCSTRING_SPLIT.split(docstring) + + if len(parts) == 1: + return docstring, '' + elif len(parts) == 2: + return parts[0], parts[1] + else: + raise TooManySplitsError() + + +def autoparse( + func=None, *, + description=None, + epilog=None, + add_nos=False, + parser=None): + ''' + This decorator converts a function that takes normal arguments into a + function which takes a single optional argument, argv, parses it using an + argparse.ArgumentParser, and calls the underlying function with the parsed + arguments. If it is not given, sys.argv[1:] is used. This is so that the + function can be used as a setuptools entry point, as well as a normal main + function. sys.argv[1:] is not evaluated until the function is called, to + allow injecting different arguments for testing. + + It uses the argument signature of the function to create an + ArgumentParser. Parameters without defaults become positional parameters, + while parameters *with* defaults become --options. Use annotations to set + the type of the parameter. + + The `desctiption` and `epilog` parameters corrospond to the same respective + argparse parameters. If no description is given, it defaults to the + decorated functions's docstring, if present. + + If add_nos is True, every boolean option (that is, every parameter with a + default of True/False or a type of bool) will have a --no- version created + as well, which inverts the option. For instance, the --verbose option will + have a --no-verbose counterpart. These are not mutually exclusive- + whichever one appears last in the argument list will have precedence. + + If a parser is given, it is used instead of one generated from the function + signature. In this case, no parser is created; instead, the given parser is + used to parse the argv argument. The parser's results' argument names must + match up with the parameter names of the decorated function. + + The decorated function is attached to the result as the `func` attribute, + and the parser is attached as the `parser` attribute. + ''' + + # If @autoparse(...) is used instead of @autoparse + if func is None: + return lambda f: autoparse( + f, description=description, + epilog=epilog, + add_nos=add_nos, + parser=parser) + + func_sig = signature(func) + + docstr_description, docstr_epilog = parse_docstring(getdoc(func)) + + if parser is None: + parser = make_parser( + func_sig, + description or docstr_description, + epilog or docstr_epilog, + add_nos) + + @wraps(func) + def autoparse_wrapper(argv=None): + if argv is None: + argv = sys.argv[1:] + + # Get empty argument binding, to fill with parsed arguments. This + # object does all the heavy lifting of turning named arguments into + # into correctly bound *args and **kwargs. + parsed_args = func_sig.bind_partial() + parsed_args.arguments.update(vars(parser.parse_args(argv))) + + return func(*parsed_args.args, **parsed_args.kwargs) + + # TODO: attach an updated __signature__ to autoparse_wrapper, just in case. + + # Attach the wrapped function and parser, and return the wrapper. + autoparse_wrapper.func = func + autoparse_wrapper.parser = parser + return autoparse_wrapper + + +@contextmanager +def smart_open(filename_or_file, *args, **kwargs): + ''' + This context manager allows you to open a filename, if you want to default + some already-existing file object, like sys.stdout, which shouldn't be + closed at the end of the context. If the filename argument is a str, bytes, + or int, the file object is created via a call to open with the given *args + and **kwargs, sent to the context, and closed at the end of the context, + just like "with open(filename) as f:". If it isn't one of the openable + types, the object simply sent to the context unchanged, and left unclosed + at the end of the context. Example: + + def work_with_file(name=sys.stdout): + with smart_open(name) as f: + # Works correctly if name is a str filename or sys.stdout + print("Some stuff", file=f) + # If it was a filename, f is closed at the end here. + ''' + if isinstance(filename_or_file, (str, bytes, int)): + with open(filename_or_file, *args, **kwargs) as file: + yield file + else: + yield filename_or_file diff --git a/lib/autocommand/errors.py b/lib/autocommand/errors.py new file mode 100644 index 00000000..25706073 --- /dev/null +++ b/lib/autocommand/errors.py @@ -0,0 +1,23 @@ +# Copyright 2014-2016 Nathan West +# +# This file is part of autocommand. +# +# autocommand is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# autocommand is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with autocommand. If not, see . + + +class AutocommandError(Exception): + '''Base class for autocommand exceptions''' + pass + +# Individual modules will define errors specific to that module. diff --git a/lib/cherrypy/_cpdispatch.py b/lib/cherrypy/_cpdispatch.py index 83eb79cb..5c506e99 100644 --- a/lib/cherrypy/_cpdispatch.py +++ b/lib/cherrypy/_cpdispatch.py @@ -206,12 +206,8 @@ except ImportError: def test_callable_spec(callable, args, kwargs): # noqa: F811 return None else: - getargspec = inspect.getargspec - # Python 3 requires using getfullargspec if - # keyword-only arguments are present - if hasattr(inspect, 'getfullargspec'): - def getargspec(callable): - return inspect.getfullargspec(callable)[:4] + def getargspec(callable): + return inspect.getfullargspec(callable)[:4] class LateParamPageHandler(PageHandler): diff --git a/lib/cherrypy/_cperror.py b/lib/cherrypy/_cperror.py index 4e727682..f6ff2913 100644 --- a/lib/cherrypy/_cperror.py +++ b/lib/cherrypy/_cperror.py @@ -466,7 +466,7 @@ _HTTPErrorTemplate = '''%(traceback)s
- Powered by CherryPy %(version)s + Powered by CherryPy %(version)s
@@ -532,7 +532,8 @@ def get_error_page(status, **kwargs): return result else: # Load the template from this path. - template = io.open(error_page, newline='').read() + with io.open(error_page, newline='') as f: + template = f.read() except Exception: e = _format_exception(*_exc_info())[-1] m = kwargs['message'] diff --git a/lib/cherrypy/_cpmodpy.py b/lib/cherrypy/_cpmodpy.py index 0e608c48..a08f0ed9 100644 --- a/lib/cherrypy/_cpmodpy.py +++ b/lib/cherrypy/_cpmodpy.py @@ -339,11 +339,8 @@ LoadModule python_module modules/mod_python.so } mpconf = os.path.join(os.path.dirname(__file__), 'cpmodpy.conf') - f = open(mpconf, 'wb') - try: + with open(mpconf, 'wb') as f: f.write(conf_data) - finally: - f.close() response = read_process(self.apache_path, '-k start -f %s' % mpconf) self.ready = True diff --git a/lib/cherrypy/_cprequest.py b/lib/cherrypy/_cprequest.py index 9b86bd67..a661112c 100644 --- a/lib/cherrypy/_cprequest.py +++ b/lib/cherrypy/_cprequest.py @@ -169,7 +169,7 @@ def request_namespace(k, v): def response_namespace(k, v): """Attach response attributes declared in config.""" # Provides config entries to set default response headers - # http://cherrypy.org/ticket/889 + # http://cherrypy.dev/ticket/889 if k[:8] == 'headers.': cherrypy.serving.response.headers[k.split('.', 1)[1]] = v else: @@ -252,7 +252,7 @@ class Request(object): The query component of the Request-URI, a string of information to be interpreted by the resource. The query portion of a URI follows the path component, and is separated by a '?'. For example, the URI - 'http://www.cherrypy.org/wiki?a=3&b=4' has the query component, + 'http://www.cherrypy.dev/wiki?a=3&b=4' has the query component, 'a=3&b=4'.""" query_string_encoding = 'utf8' @@ -742,6 +742,9 @@ class Request(object): if self.protocol >= (1, 1): msg = "HTTP/1.1 requires a 'Host' request header." raise cherrypy.HTTPError(400, msg) + else: + headers['Host'] = httputil.SanitizedHost(dict.get(headers, 'Host')) + host = dict.get(headers, 'Host') if not host: host = self.local.name or self.local.ip diff --git a/lib/cherrypy/lib/auth_digest.py b/lib/cherrypy/lib/auth_digest.py index fbb5df64..981e9a5d 100644 --- a/lib/cherrypy/lib/auth_digest.py +++ b/lib/cherrypy/lib/auth_digest.py @@ -101,13 +101,12 @@ def get_ha1_file_htdigest(filename): """ def get_ha1(realm, username): result = None - f = open(filename, 'r') - for line in f: - u, r, ha1 = line.rstrip().split(':') - if u == username and r == realm: - result = ha1 - break - f.close() + with open(filename, 'r') as f: + for line in f: + u, r, ha1 = line.rstrip().split(':') + if u == username and r == realm: + result = ha1 + break return result return get_ha1 diff --git a/lib/cherrypy/lib/covercp.py b/lib/cherrypy/lib/covercp.py index 3e219713..6c3871fc 100644 --- a/lib/cherrypy/lib/covercp.py +++ b/lib/cherrypy/lib/covercp.py @@ -334,9 +334,10 @@ class CoverStats(object): yield '' def annotated_file(self, filename, statements, excluded, missing): - source = open(filename, 'r') + with open(filename, 'r') as source: + lines = source.readlines() buffer = [] - for lineno, line in enumerate(source.readlines()): + for lineno, line in enumerate(lines): lineno += 1 line = line.strip('\n\r') empty_the_buffer = True diff --git a/lib/cherrypy/lib/httputil.py b/lib/cherrypy/lib/httputil.py index eedf8d89..ced310a0 100644 --- a/lib/cherrypy/lib/httputil.py +++ b/lib/cherrypy/lib/httputil.py @@ -516,3 +516,33 @@ class Host(object): def __repr__(self): return 'httputil.Host(%r, %r, %r)' % (self.ip, self.port, self.name) + + +class SanitizedHost(str): + r""" + Wraps a raw host header received from the network in + a sanitized version that elides dangerous characters. + + >>> SanitizedHost('foo\nbar') + 'foobar' + >>> SanitizedHost('foo\nbar').raw + 'foo\nbar' + + A SanitizedInstance is only returned if sanitization was performed. + + >>> isinstance(SanitizedHost('foobar'), SanitizedHost) + False + """ + dangerous = re.compile(r'[\n\r]') + + def __new__(cls, raw): + sanitized = cls._sanitize(raw) + if sanitized == raw: + return raw + instance = super().__new__(cls, sanitized) + instance.raw = raw + return instance + + @classmethod + def _sanitize(cls, raw): + return cls.dangerous.sub('', raw) diff --git a/lib/cherrypy/lib/reprconf.py b/lib/cherrypy/lib/reprconf.py index 3976652e..76381d7b 100644 --- a/lib/cherrypy/lib/reprconf.py +++ b/lib/cherrypy/lib/reprconf.py @@ -163,11 +163,8 @@ class Parser(configparser.ConfigParser): # fp = open(filename) # except IOError: # continue - fp = open(filename) - try: + with open(filename) as fp: self._read(fp, filename) - finally: - fp.close() def as_dict(self, raw=False, vars=None): """Convert an INI file to a dictionary""" diff --git a/lib/cherrypy/lib/sessions.py b/lib/cherrypy/lib/sessions.py index 5b3328f2..0f56a4fa 100644 --- a/lib/cherrypy/lib/sessions.py +++ b/lib/cherrypy/lib/sessions.py @@ -516,11 +516,8 @@ class FileSession(Session): if path is None: path = self._get_file_path() try: - f = open(path, 'rb') - try: + with open(path, 'rb') as f: return pickle.load(f) - finally: - f.close() except (IOError, EOFError): e = sys.exc_info()[1] if self.debug: @@ -531,11 +528,8 @@ class FileSession(Session): def _save(self, expiration_time): assert self.locked, ('The session was saved without being locked. ' "Check your tools' priority levels.") - f = open(self._get_file_path(), 'wb') - try: + with open(self._get_file_path(), 'wb') as f: pickle.dump((self._data, expiration_time), f, self.pickle_protocol) - finally: - f.close() def _delete(self): assert self.locked, ('The session deletion without being locked. ' diff --git a/lib/cherrypy/process/plugins.py b/lib/cherrypy/process/plugins.py index 2a9952de..e96fb1ce 100644 --- a/lib/cherrypy/process/plugins.py +++ b/lib/cherrypy/process/plugins.py @@ -436,7 +436,8 @@ class PIDFile(SimplePlugin): if self.finalized: self.bus.log('PID %r already written to %r.' % (pid, self.pidfile)) else: - open(self.pidfile, 'wb').write(ntob('%s\n' % pid, 'utf8')) + with open(self.pidfile, 'wb') as f: + f.write(ntob('%s\n' % pid, 'utf8')) self.bus.log('PID %r written to %r.' % (pid, self.pidfile)) self.finalized = True start.priority = 70 diff --git a/lib/cherrypy/test/helper.py b/lib/cherrypy/test/helper.py index c1ca4535..cae49533 100644 --- a/lib/cherrypy/test/helper.py +++ b/lib/cherrypy/test/helper.py @@ -505,7 +505,8 @@ server.ssl_private_key: r'%s' def get_pid(self): if self.daemonize: - return int(open(self.pid_file, 'rb').read()) + with open(self.pid_file, 'rb') as f: + return int(f.read()) return self._proc.pid def join(self): diff --git a/lib/cherrypy/test/logtest.py b/lib/cherrypy/test/logtest.py index 344be987..112bdc25 100644 --- a/lib/cherrypy/test/logtest.py +++ b/lib/cherrypy/test/logtest.py @@ -97,7 +97,8 @@ class LogCase(object): def emptyLog(self): """Overwrite self.logfile with 0 bytes.""" - open(self.logfile, 'wb').write('') + with open(self.logfile, 'wb') as f: + f.write('') def markLog(self, key=None): """Insert a marker line into the log and set self.lastmarker.""" @@ -105,10 +106,11 @@ class LogCase(object): key = str(time.time()) self.lastmarker = key - open(self.logfile, 'ab+').write( - b'%s%s\n' - % (self.markerPrefix, key.encode('utf-8')) - ) + with open(self.logfile, 'ab+') as f: + f.write( + b'%s%s\n' + % (self.markerPrefix, key.encode('utf-8')) + ) def _read_marked_region(self, marker=None): """Return lines from self.logfile in the marked region. @@ -122,20 +124,23 @@ class LogCase(object): logfile = self.logfile marker = marker or self.lastmarker if marker is None: - return open(logfile, 'rb').readlines() + with open(logfile, 'rb') as f: + return f.readlines() if isinstance(marker, str): marker = marker.encode('utf-8') data = [] in_region = False - for line in open(logfile, 'rb'): - if in_region: - if line.startswith(self.markerPrefix) and marker not in line: - break - else: - data.append(line) - elif marker in line: - in_region = True + with open(logfile, 'rb') as f: + for line in f: + if in_region: + if (line.startswith(self.markerPrefix) + and marker not in line): + break + else: + data.append(line) + elif marker in line: + in_region = True return data def assertInLog(self, line, marker=None): diff --git a/lib/cherrypy/test/modfastcgi.py b/lib/cherrypy/test/modfastcgi.py index 79ec3d18..16927d3c 100644 --- a/lib/cherrypy/test/modfastcgi.py +++ b/lib/cherrypy/test/modfastcgi.py @@ -14,7 +14,7 @@ KNOWN BUGS 1. Apache processes Range headers automatically; CherryPy's truncated output is then truncated again by Apache. See test_core.testRanges. - This was worked around in http://www.cherrypy.org/changeset/1319. + This was worked around in http://www.cherrypy.dev/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. @@ -112,15 +112,12 @@ class ModFCGISupervisor(helper.LocalWSGISupervisor): fcgiconf = os.path.join(curdir, fcgiconf) # Write the Apache conf file. - f = open(fcgiconf, 'wb') - try: + with open(fcgiconf, 'wb') as f: server = repr(os.path.join(curdir, 'fastcgi.pyc'))[1:-1] output = self.template % {'port': self.port, 'root': curdir, 'server': server} output = output.replace('\r\n', '\n') f.write(output) - finally: - f.close() result = read_process(APACHE_PATH, '-k start -f %s' % fcgiconf) if result: diff --git a/lib/cherrypy/test/modfcgid.py b/lib/cherrypy/test/modfcgid.py index d101bd67..6dd48a94 100644 --- a/lib/cherrypy/test/modfcgid.py +++ b/lib/cherrypy/test/modfcgid.py @@ -14,7 +14,7 @@ KNOWN BUGS 1. Apache processes Range headers automatically; CherryPy's truncated output is then truncated again by Apache. See test_core.testRanges. - This was worked around in http://www.cherrypy.org/changeset/1319. + This was worked around in http://www.cherrypy.dev/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. @@ -101,15 +101,12 @@ class ModFCGISupervisor(helper.LocalSupervisor): fcgiconf = os.path.join(curdir, fcgiconf) # Write the Apache conf file. - f = open(fcgiconf, 'wb') - try: + with open(fcgiconf, 'wb') as f: server = repr(os.path.join(curdir, 'fastcgi.pyc'))[1:-1] output = self.template % {'port': self.port, 'root': curdir, 'server': server} output = ntob(output.replace('\r\n', '\n')) f.write(output) - finally: - f.close() result = read_process(APACHE_PATH, '-k start -f %s' % fcgiconf) if result: diff --git a/lib/cherrypy/test/modpy.py b/lib/cherrypy/test/modpy.py index 7c288d2c..f4f68523 100644 --- a/lib/cherrypy/test/modpy.py +++ b/lib/cherrypy/test/modpy.py @@ -15,7 +15,7 @@ KNOWN BUGS 1. Apache processes Range headers automatically; CherryPy's truncated output is then truncated again by Apache. See test_core.testRanges. - This was worked around in http://www.cherrypy.org/changeset/1319. + This was worked around in http://www.cherrypy.dev/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. @@ -107,13 +107,10 @@ class ModPythonSupervisor(helper.Supervisor): if not os.path.isabs(mpconf): mpconf = os.path.join(curdir, mpconf) - f = open(mpconf, 'wb') - try: + with open(mpconf, 'wb') as f: f.write(self.template % {'port': self.port, 'modulename': modulename, 'host': self.host}) - finally: - f.close() result = read_process(APACHE_PATH, '-k start -f %s' % mpconf) if result: diff --git a/lib/cherrypy/test/modwsgi.py b/lib/cherrypy/test/modwsgi.py index da7d240b..c3216284 100644 --- a/lib/cherrypy/test/modwsgi.py +++ b/lib/cherrypy/test/modwsgi.py @@ -11,7 +11,7 @@ KNOWN BUGS 1. Apache processes Range headers automatically; CherryPy's truncated output is then truncated again by Apache. See test_core.testRanges. - This was worked around in http://www.cherrypy.org/changeset/1319. + This was worked around in http://www.cherrypy.dev/changeset/1319. 2. Apache does not allow custom HTTP methods like CONNECT as per the spec. See test_core.testHTTPMethods. 3. Max request header and body settings do not work with Apache. @@ -109,14 +109,11 @@ class ModWSGISupervisor(helper.Supervisor): if not os.path.isabs(mpconf): mpconf = os.path.join(curdir, mpconf) - f = open(mpconf, 'wb') - try: + with open(mpconf, 'wb') as f: output = (self.template % {'port': self.port, 'testmod': modulename, 'curdir': curdir}) f.write(output) - finally: - f.close() result = read_process(APACHE_PATH, '-k start -f %s' % mpconf) if result: diff --git a/lib/cherrypy/test/test_auth_basic.py b/lib/cherrypy/test/test_auth_basic.py index d7e69a9b..f178f8f9 100644 --- a/lib/cherrypy/test/test_auth_basic.py +++ b/lib/cherrypy/test/test_auth_basic.py @@ -1,4 +1,4 @@ -# This file is part of CherryPy +# This file is part of CherryPy # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab:fileencoding=utf-8 diff --git a/lib/cherrypy/test/test_auth_digest.py b/lib/cherrypy/test/test_auth_digest.py index 745f89e6..4b7b5298 100644 --- a/lib/cherrypy/test/test_auth_digest.py +++ b/lib/cherrypy/test/test_auth_digest.py @@ -1,4 +1,4 @@ -# This file is part of CherryPy +# This file is part of CherryPy # -*- coding: utf-8 -*- # vim:ts=4:sw=4:expandtab:fileencoding=utf-8 diff --git a/lib/cherrypy/test/test_core.py b/lib/cherrypy/test/test_core.py index 6fde3a97..42460b3f 100644 --- a/lib/cherrypy/test/test_core.py +++ b/lib/cherrypy/test/test_core.py @@ -586,9 +586,8 @@ class CoreRequestHandlingTest(helper.CPWebCase): def testFavicon(self): # favicon.ico is served by staticfile. icofilename = os.path.join(localDir, '../favicon.ico') - icofile = open(icofilename, 'rb') - data = icofile.read() - icofile.close() + with open(icofilename, 'rb') as icofile: + data = icofile.read() self.getPage('/favicon.ico') self.assertBody(data) diff --git a/lib/cherrypy/test/test_encoding.py b/lib/cherrypy/test/test_encoding.py index 882d7a5b..6075103d 100644 --- a/lib/cherrypy/test/test_encoding.py +++ b/lib/cherrypy/test/test_encoding.py @@ -46,7 +46,7 @@ class EncodingTests(helper.CPWebCase): # any part which is unicode (even ascii), the response # should not fail. cherrypy.response.cookie['candy'] = 'bar' - cherrypy.response.cookie['candy']['domain'] = 'cherrypy.org' + cherrypy.response.cookie['candy']['domain'] = 'cherrypy.dev' cherrypy.response.headers[ 'Some-Header'] = 'My d\xc3\xb6g has fleas' cherrypy.response.headers[ diff --git a/lib/cherrypy/test/test_logging.py b/lib/cherrypy/test/test_logging.py index 5308fb72..2d4aa56f 100644 --- a/lib/cherrypy/test/test_logging.py +++ b/lib/cherrypy/test/test_logging.py @@ -113,7 +113,7 @@ def test_normal_return(log_tracker, server): resp = requests.get( 'http://%s:%s/as_string' % (host, port), headers={ - 'Referer': 'http://www.cherrypy.org/', + 'Referer': 'http://www.cherrypy.dev/', 'User-Agent': 'Mozilla/5.0', }, ) @@ -135,7 +135,7 @@ def test_normal_return(log_tracker, server): log_tracker.assertLog( -1, '] "GET /as_string HTTP/1.1" 200 %s ' - '"http://www.cherrypy.org/" "Mozilla/5.0"' + '"http://www.cherrypy.dev/" "Mozilla/5.0"' % content_length, ) diff --git a/lib/cherrypy/test/test_request_obj.py b/lib/cherrypy/test/test_request_obj.py index 31023e8f..2478aabe 100644 --- a/lib/cherrypy/test/test_request_obj.py +++ b/lib/cherrypy/test/test_request_obj.py @@ -342,7 +342,7 @@ class RequestObjectTests(helper.CPWebCase): self.assertBody('/pathinfo/foo/bar') def testAbsoluteURIPathInfo(self): - # http://cherrypy.org/ticket/1061 + # http://cherrypy.dev/ticket/1061 self.getPage('http://localhost/pathinfo/foo/bar') self.assertBody('/pathinfo/foo/bar') @@ -375,10 +375,10 @@ class RequestObjectTests(helper.CPWebCase): # Make sure that encoded = and & get parsed correctly self.getPage( - '/params/code?url=http%3A//cherrypy.org/index%3Fa%3D1%26b%3D2') + '/params/code?url=http%3A//cherrypy.dev/index%3Fa%3D1%26b%3D2') self.assertBody('args: %s kwargs: %s' % (('code',), - [('url', ntou('http://cherrypy.org/index?a=1&b=2'))])) + [('url', ntou('http://cherrypy.dev/index?a=1&b=2'))])) # Test coordinates sent by self.getPage('/params/ismap?223,114') @@ -756,6 +756,16 @@ class RequestObjectTests(helper.CPWebCase): headers=[('Content-type', 'application/json')]) self.assertBody('application/json') + def test_dangerous_host(self): + """ + Dangerous characters like newlines should be elided. + Ref #1974. + """ + # foo\nbar + encoded = '=?iso-8859-1?q?foo=0Abar?=' + self.getPage('/headers/Host', headers=[('Host', encoded)]) + self.assertBody('foobar') + def test_basic_HTTPMethods(self): helper.webtest.methods_with_bodies = ('POST', 'PUT', 'PROPFIND', 'PATCH') diff --git a/lib/cherrypy/test/test_states.py b/lib/cherrypy/test/test_states.py index 28dd6510..d59a4d87 100644 --- a/lib/cherrypy/test/test_states.py +++ b/lib/cherrypy/test/test_states.py @@ -424,11 +424,12 @@ test_case_name: "test_signal_handler_unsubscribe" p.join() # Assert the old handler ran. - log_lines = list(open(p.error_log, 'rb')) - assert any( - line.endswith(b'I am an old SIGTERM handler.\n') - for line in log_lines - ) + with open(p.error_log, 'rb') as f: + log_lines = list(f) + assert any( + line.endswith(b'I am an old SIGTERM handler.\n') + for line in log_lines + ) def test_safe_wait_INADDR_ANY(): # pylint: disable=invalid-name diff --git a/lib/cherrypy/test/test_tutorials.py b/lib/cherrypy/test/test_tutorials.py index 39ca4d6f..390caac8 100644 --- a/lib/cherrypy/test/test_tutorials.py +++ b/lib/cherrypy/test/test_tutorials.py @@ -78,7 +78,7 @@ class TutorialTest(helper.CPWebCase):

[Return to links page]

''' @@ -166,7 +166,7 @@ class TutorialTest(helper.CPWebCase): self.assertHeader('Content-Disposition', # Make sure the filename is quoted. 'attachment; filename="pdf_file.pdf"') - self.assertEqual(len(self.body), 85698) + self.assertEqual(len(self.body), 11961) def test10HTTPErrors(self): self.setup_tutorial('tut10_http_errors', 'HTTPErrorDemo') diff --git a/lib/cherrypy/tutorial/pdf_file.pdf b/lib/cherrypy/tutorial/pdf_file.pdf index 38b4f15eabdd65d4a674cb32034361245aa7b97e..226dfe18f73406a8f56c1a4b3508ca5e3601853f 100644 GIT binary patch literal 11961 zcmai)Wn5cL)9|4b*P=xmtT-fr5S&unU5XTUD+KrArD$=N;x5IlErsIlPVwUI@PQoM3>*;8idpVr)92!I#F{nz+kvOjG9^3KWqXaRx1z5V6?aX$DL z1Oz{Z_{a3{E;s1W^`8MhF7Ds0|KI=985b89JJ(;1|7>#tA1C=^WB!=#4-37&cMtlH zC-7%7f&d^+Uar4W6Aa+t1Oor=!Jq!$O#m+#!uw~^{_mWPa6(oa-<#x+!X5R``Y}u? zn&{+8JAi-`j>Gw5m>S#PFVIQ000WJNabW>_RLZq4r0sxr({rfRdZF2@>_MRU!sq

Q!MJdIajfJyRwM!`QGX$wbu7RedFj%N-H#Xh|F`U<-e zX|1BC0U`XApl4y9zK32Q+bHEO7UCj{fs!@a{JPM0B0D4pltmKM-^?Y55K8L9_Oyw6 z$7O_jZ|EyLlC7of`^furG>iuVthzc09v=Ud45bFjVPcE`1PIZ1Lo}5X4&b@n$=ow6 zn8^IkfQ7G0&!>Mct|)FAWch#L#`6+hURrI`;uociVb^6>nm%~e=v&a(ZtC6;!TS9h z;uFax1fL5AROeu=CH-cG3;k4YbjgN})(2vn?04jIOQO>ae2zOE*$U-&rj6}-`|5rV zTe|?MECgKKsA#koc)y+ox@pb99uz7zx6!VQpWbOKDK78qg}!?_yNHW;6g2yK{{dl< z-2jS)x4nyTbauZK>??zShh~9PEAgZZ;G)E{EIe~Cfj8H_cC*eeD(Lr8;9?pH(cS0a z-W%K;l8F5T=j1l(CiBj5$NG6s;q%G|$Wgm37>tIS1!WM%jgHmyLq)sB24~*{90-qD zywCQnKWaCMU#UaO>egRa)E{f-bGac$VD*g+Ai5mVB@6W&f9=QmfY`_sg%EXihkE*e zeI`u(01bQ?Jbrj`ZUriSM>vofN%|!Jlg$=lsAru!-@NAb?w2DYy1c-{MNP;vv-($VibB^zQw=kA`2` zMadP%oM3IK{-Q7?`$hDvvHv{hW+wgm$|PFq65%Ql-c$ed^luq{H}p%gPhQy%3zFCZ zsC-{xt!?=+Jzb({M~l9>U%#e(_*`6D zQgb4Bbie$oEzh^dR}1I8+pAK-UhRz{@!iDz4|ydmzL|AAEofuB-;}4iU;iLz$e(|CB#QU_jNK&8=kA>c7N;f@P8I|y(>AlE&qx+(1z85#f`dNoeTsA02XMQ4#)dY& z8R06X3&%Hk2zV(PdQAH|lC<9SJM{T7p2NXc(c}__taSk@Dr3WNjd7t~!S^P6}I&eO5)pH+{dN6qu)B?_@VI(}NY7Ip}p zw12u}8sWycVbQ@ao0%`6RQj5QlmQ=X$q<5Q)xsDhoMMuXpI*AwZj|S3OnxkYH|G({ zh)-@R?v-|>JXMhNLzEJqP3VeZ)<9t-N~~W@wmmraNF9y25tu(G)KiR=RS1w(O44o( z0xn_rJRL@Tj#Y~Tv5it>j1Z0xj$C8k|E8Jr1y-PnCl%@-JdSOCSk!{~%Hkug4Hn(X zFC3j~Z{ndWlq13$gM#@Fz6*}i73HmIGWuI}!dl%pZ&{a)f0h?r9a76zr+!yC)?#!> z#hS|DAn#aPs5dsmWpnTu=9xI+Tw4yp@5MsQNHXY6&C6sr(mniA=GO0n>eq}aJi5<(ew((~uD!)9cVL<^H1@YC(x1UNoYgs$ z2{{mJ!zE;sscY4g2h^C+A5aQB7Jf;&c5|$i?IW17i3>PvGMR3r z;5#@qw_$k@nwS55Z@hw9L7J26{7lQxfu+c1Np`s@nRd0Z!0$@y3rb*=V7x_u-uya~ z8`U&d4MMKUR~C$yngN(wRE?ZOFuN$9!hW^WCPw@#>HE@;yms+<2X!J>U2j*5J-&OY z$373u#Coj?tD$lQboxyaa)F#jiVkj_EMkix6u4{*LoBXIHAZX63S9=x)p846R(&7z z!#C>$Z3p}yV5^>!jZhVGshZ!tg$@-T6QlRgqvN{GWZLxwhWtLLyIpXvR1Z%n3jXwC zG3XjGl+2;~{1nr3M~?beXv*9vRZXe(b1H$#Pxogs6R6pd)*@g~iiy;h%a7o?XG1V` z?-PaLcCMSx?yJ1dlcfXL^3Z1oR|a1x^W;ij_HRndXI2}@XO#=_=1$vr@!R`vD7k1J zKrSU#h+g7=O4q96%EiTVp7;xy0tk#5MYo#`zg`o2E7V&P%Y2Kec~Pmsv3FHw5)ZLkN`02PEq(4FKHnW5X|m!lEQ~vwQNA8=tOQ z;1RQ@XW`6RVq;hJd6-?>NQWsHZp)~zyyp1%n~(QARpb>{II@}MNE*l(qCy2x)Tw_1 z&frQ+JFA(oue1gxPj=G5fa1Fz|^K zC2Wfu63aU&{R$_K$<)DL%cs6zQ_)kF3KV&Ref^Q?tw-x_lK#1ReX?LGJ{ercPx?2J*I2GMxmTq> zFx9Ybw9zq7rakaXADd?<_@I^0lP)5?8ANY(q-?G}D3cr0qm&J$eAjaPyPV4GDWFY* z-OMw~`G~IhDZaduG9I}>2OlJOOh&UK9aNlEFt2MMG+?a(ZyjQVxUXTauy48Jr`8a+i;^?j&Y zgb`LoX^Uy-H;wW;6=PR&+9H=*zt=*qOR8+$lNh&%M0@5Wnhb^7f?;ei(lL~)j3hBY zHM72WN2?afP0|io<%q!BboRuNvB5xXGA6mXtdQ*uwcrzKxt81gADuvsdFNoz+Yawf ziu)(jl>8|)8m1PZ%#EfsY#ONz3*89mWFV~w%cv|JCU`4dUxi}Rk18BRuE5Zto%z{+X$(U( ze3KUe-@K!HZ@$2?3SpY$wKWEhgi1z}nNJAy8(fuM4nwr$4`tPE6e&NjpxPOI1G8vB zfnP&TK5BY)9)C25J$yBX$%gCWn-^`Pl9{O$44p8l2^%MfLW_{vx-?OEnjd99$>GD8 zJQ|UZV(``vA63Pr44I2vOooEqEl91ccb;mh;oavwaj!aKUZ!)v#Ku=inItt<%!{8a zO<$&vLr1rOp-JdMMi}jmhouHVlS*O@&i1Vqyqv6IYSv>hpn}mD#Ev$5PqjBDie#pt zKKM!;rDFQJOR3&pUc48_zpG8l+So41vECm}`)<|^wsW=RK#PqGOm$VfS1{6(W&$#5hl>Z8K9hEtrjW5b)7cD-UKP ztFjekVEPf&+(ABr4W&K8zasHFM)M4OEbiCUoQOV@A0Gvc4@pwxsf!%53WNz7A)V#c z%qrmKeB>v1`y1rT+f7H%68 zNCo0r@9d60;d#we<<)c9XF=a?9oKenab6cO8B5bIdHvOJc2VrJE5EQZyX8-%_^O}b zlVK7yjB2FNvXga|9hbt2v5X$M3#L@Tv#HLf=}6mAk~<%Ek8b3?h)(RvhVx84xpPYQ zb@xa1`>ibz*10NbG)}Xn9A~!G*%1|?)J-Fr`}B5ogCyil1_Jx2G5tGE?EtIh^|-fz z>-gmj(R{He9{u!BCL6Ws%z|Gsv%paFU#j5F zGAXlB&ZzTQYMU8EAGT!3^1hPHN`JI@cxpqv%7*+1>)G%_2?^Ft#;x=jeLVPGB!meoN;+b{ZUWEYK!;Ex!5XY%jOT#;#39dddH6(i^BZ1jE!6XM}) z2_G6gPnMeXWaxGlJlb87QWsmyKGLQ)OzdAN=Ng)le;1lNw58@D5D6iG=oLndEknF) zF2>4Vw2<9R)@oCG@`$&@uU*A&dg)2e3*6n9eg4 zwcYc+KX{j23KN@J#c5^VE|=HUIi5r9IatO2;?)VH*^(ev8Wg%&whM#%yqSW2YMbB&*_M{tAEo$Q}Wk zyP^0O{ywb`wGW-Y61I9qTIOU69J#g7EV2)QH;2eIBL2?q!X=Ge{Vx(6KD_k1l(49d z;mid{bZ384`yuy)EfRQFn7vTymCH5h3l+}iNtoDq}8uN=!sVY-;jIdxTAzv4~?RnpIL_mJ8Nr#vyD>4r`M7;a# z1pjB`Et|19suwnGzTc2)_kwB z=i^vYv#u%(QNOos0E#R}`1oAM^ILbmr<4}sC>l%J`ZR!feajIK%UwCwQb=#O+Qw!_ z6By)BaX?V#63O>RWv#Xr@I{H-{>>##L zuNx2nYEwgt?GU38gIuwVRlzTNwNd%a;_kverl{^xSF`Rrdvv7kw_gZNrR`_doAFQl0$)kKUOfqA-ZGuVa zAaBmC6?tw@Q@-wuQMT@#D8uO^&rRGX8WftHgh_;XfKe(~RDB|d*=?Pm{M%aNEVUgH|x9xzpVfAv)2V5?h~Tra2HmXXu5tI3lTgUP~3z?0!) zYx(eE8*C6YLl!yg6Qww^Afsi#KuiF2jfde%d85r z;7b#Uv%&Gqw4d)w5cmJ^8vrYU$>B^i87N_k?O?q1NH~8d1NbTZvuE-WJ~k zBjdFa{>abDy&4a_XX6O#bY~P3<77j8dSBaLe|IRb_lH_+>%C zThul?Lt01oAOoTLU?9ebp=DsRbn@67ZkV%$^h{u^@J2YZkRREnd@_@nK<@<~DN{)I z*9FnSr$VuA4L%``Oj4;umT?bTY6)w2uAH6@t(6Jy%^eMwZ#Ld(%_rq|M+$yhMJtV^ zfz)hI*$%E?W)d4gk54?noo@Y&Tqy5oT$0d3d3rG?_SG1ST{O2#l=B8GLQLTF}7#B0Gi|9F>`gt{4eX{b(QG&}^21pk>q}o!ds_(8FfGv1YX#8!P+v8kB z-M9SrEQ!1`#9w8|N8p3zirKeayB=-Ys)aEM5j-WN*7J#ZZoM&@DVmVn!NTfaDmi>U zr#qZZ4g%~4T0-I_uj2%R+W7rn@%5`+_d8Jt7}{wX+r3Sz>g%-LdR8KuP*ZwbdMul4 zjVKXvDHgexVqo}tbGoOS#EXWouGWp}z3dOg&?Sl&Q>A@kV_)0#4Uk9i$P#~MiDm6` zIk+F+No7iN;b;mz1eT@$YLp$iJ?(rZ3sel-PB!Ea>b?&X0&_z$y@YemNll=8lB{8UCSAq!I_yk^_gWxfSG0-fM}fsWFu(KS4k)H)bbEejn8@nJJB|nHSIa*|sIvm%Pln4+MBsVoY9WJrJzh~rC%43+5R#aIuuSzbmDZ&Z)#>r^`8JxI#C)q|` z%PTtxgKnx3pZKVOUt$yN9YQkrPw;2>onz`79Tm*$o}0diP~l59*GMYzHv9^x|W0msT zx6VIYCjguoMf}Bq0kFhbhbtJR42~{W5y3e)k*HRq=qXd8rgy=!9wxiwrCs%yBElpM z)Jqn0X*7iTPpiA4{)@J#)r4Qp7Tm}J?)O7nA|p{|<T^$Y+)D&3#s&uMQpx%f70dwB8_*=(KN#D-gwq6}f~$z%mXEnR+k zQ(&=vpwDVn>q)r^#n{+ToyTf@{dhT5F{kQ=O>iT#BrSc4h($T0KwPn@|3pE-*uk{zj$&68(FRH-lTFeXhNKnoA!i%X5{c;WX@et3$#;iGE{1 zB!74^4ybDG*N$)I^v*ISQj*%^1Lh}d6xd!vu;dSzAA2LOT_0y)Nsha}MzXE9Tne>M z(Swdc^E=H@7ckRDSBSXrr{7o2Oa&Zs6_iN$y7d|-ofy{;-+dgESp~4uK2w^<8&=F! zyTNXf#p9iM0imaU)#Ku-3T|({%O84FjL|OAlNey1X-nzvx^eMd ze4&)NWXCXFc|XCKK0-dDCOz$vMxk@`)eo&_`l`r_tw!A@z`M5_>AR4TnnEWPO{Rs^ zQY-|+pCQx=iEVx06*KZgU5};d+@f_*+1KF#mDu3(9PZ(9lZisA`jJX|AJI2E;rh}$ zZnV)d207~n=bCEP@=i=aKGe{}JlPgZ(TB07Tz{iFZ;np?a72^=*87!cKCV05y53v6 z7`Na(v!GiFVY+QtfNw%x_0^tL8_wdf?=e|X%96U%=gL>5@`w zVzTbgyfxm1Z-BqlPtMWf7eFIVr=(%U;%~Ndy6S(M&$BHl8yo<)c?E9m=4%I@M7*J! zzXb*$5&1|h_QSqXFC5gq4|>TF^MbzRly z1()W(96Q8MYFChHqB0sP@aYehOw<0XpU4l$IzEl?pli0NOge=bs|n2v;}cyV!P4|0 zwKGG6lCEqX#j z=o>RE4x4_@7MC(#&cjJ&HC`u^XOyuZuG%Kw2mpUTMuW3S|-pbM%)#j1F0E zS37Io?)f2mg&$8jBSlZgXlz-RM15Jsy}U-zc!lY@7VNTB=sZo^*t_xBAB_2xIa9(O z%~tEviZ%_-dMXbqUwmOHU0npx=w@afZ;aewVsvCNSJjDT)a5AYj^D^U)vnpfsm5yq zr__@XEc1xeB(6(Sc@O#|=Jl4@Eglf#Hvvu4mG}-2N zNoUnVPPk~&zW0PeJ^Xo;EcM1pw`*)4oOCQmC!u}X-%Y)tMq!J$MU@&w2$cFST|DfF z4R9HMye<+b@7s2FE0fPRLRKMDCEq4!?Yt7PKah_v@#B1DD?~SOyS~+wT&0|pFjD+g zGLVSGg+gE7TOrjZVMeJ?idU(ulw;NSQ24&&I_bZOG@gF|N`De*kD!>CDa_c>@|}~t zBOE@0_Z19n;jEgv!kgDJ8Z4snDj+zg1PAEMVE{PE^r-Oq4>paB3kYBX|3O&+;iwh_ z2uIUIV8)MV8uugfBx?9h%GA=_0^a4}#Nc?G<#CJ+^f)N(WN2e)EMjMFWBQ0esXCe3 zYQW7fIJEw<;o=0rU04`8KJsde98w&X9QJmmOn(Ld$NEeGkF=G_KiD%af5W8UEhWcg>-n8e z#!h>oN4aZgaCCjbmDEwZ;4G3-y`7pafPGh>dWk2ee?5CDfAekMK5i!Y?8X?ISvd@@L5@6k{XS7-uCBN(;Ro0@@ z1*$?IyAJaTV%A0}!eUDRVayxpd^IU70@epre&NaS?-U`!{7A5lL5ynY-$N%#%RVfc z9ipx4TGb?h+~cLDb~kfMvL~QxKq_g>pqb%Z+P0idx;?b3MIUK#FhAK?$ z6Dfqg?c`d-v;D)Jc?I@4cPIkG+itvnQ$kz_%6#6a{9!0UMF<*Jos%W0*TV0!f07$E zCs1_Et(da3A^ow!ee~Em!)faz*IY=E@7R5`YA9C0E8}#2Q<}po;|p9cyD9xRbm2Z4 zG!)D3odMoIi9^pUkh>Nzt=UO$)Ni}VTebbp@8Nw`3|pE*9TBYC&+Z_Y`m%9N^`e

mFekkHh{DNXc%=j|!o`1gE$!{Z z;CwNFQH&1^1cQLQAWkq4!V3bj0Kp7EAOl=a-rnSYsi-*`zI$hC0%vv&ZD4RrSV>J> zi(SIm#>U9d&h9ZJH494^0RH(iG`2rO18}kPumb^%e@DplkBaE#XljN5r(nSt!2i48 zye)(i0x$#oEd%qwH}UZT*!`aj1ch(PzhpoV{Gt4Z3g1@Z6KOGqVSP{sOM0#Q-`2KpqKEaWF3k$_*A56?qNj7USaP<%WoYc(^6R jp+GSq!2ed!d&I0^PKJ(7f9yR71mWhwpr;pCl)(6ZwXwED literal 85698 zcmZ6yWmKD6*ENh3DehLZxCZy)?rw!rNCE_RcUs)tp}14rwYW>M;_gt~;iaedbG|da z{K=Lzm&`TyzA`crY8447W;PZMBkB6C^m6elR(#XN>b>GC%#mF8^ z{@0760~5KZr6sxAA6o}vVC`gO47PWGm|6osHkOt~_5fS(YdzME z03)l{k%N&n=&!NEt4~WX!1lEiYfG?+^O?7?7w$t!yR6Noby-~e$2IDnnO)&TGy zY5>-dS3{tUrH%FLvK8d-$P(<}007%L8Ce2M|Ih`te@zbT@P{^lkt0A9AO;WzNB|@O zQUGay3_unj2apFS02BdA0A+v*Koy_{PzPuLGyz%wZGa9y7oZ0)ekIca{7UOdy+r)p?z?PN}TL*{(0Ayrp`pV1Sw?8fZo4JW4*!5349L;Q; z{^_bZ(8&IkHd7~v8BbU~0;vHr>bzd8lkxL5=JQR5$l zS^kkV(8>OfsNDc=uTn9#x3K_Q8^5a70Sx>{1^>DM{@;NF*w}+iUKR9@p#D^A3AVEN z+iy!7QwY$=(%QxmVCH6P_Uh0cYzp}k3Jd~R83F(B0|LJ)?N#@$%Kj_qSMfSH{iAF9 zzasv3W(0I{1Ou#`{u~XXS9RE${ZIdYD)`4I5Da>aZ}jI7{EK=`&&tRF==56MtET=h zzyFbA{YUKfe{}u|F*C9>`5W?oiw^&){~w}6|0+`SZyuum>AUFPUWoq7j_9j?OaY>j z0P+7zN%AjP@-JBOKVZp!QL=w+WdGX8{>Mi4UmHnBGl0_HaLRwpmH(P6|HoYUe+K`e zteh+zA-0xo0Oh~uPxCKS^Dk8MKTyqoF}i;@HOy@6Uu6uow|c#+j4d4if2jX+F#m2F z{RjJczrG$VfYHA$jQ*j)==GxcKN?=U{Ra&G3kLrO{Mx$zDHQV82J+YDe>cNF=L_<$ z4fvG?>%ZY_{+iqTHMjYXOPl{0{EGrXoFT6X+WaNK=`YafFVN{fK&O8ZZhtplPxUJa zZvVGz@bA-s6c1JW6XEH8U_0WZ)vPw=@!eTWfGgNU9hdusejpY-+X@05{hbrxdlj^fo15D@J@kgs z*h+Prp^=@ulw^U6>-iEjHN)Tkk)jhe5Ad?jJxoIy?x%`dUL-Xsb(4 zn4*a&L1pJQwYBsLqj^r13#s3}-8ee9q+kC%`0z&E&K|ZYr3EHK)K|Hb0ZZ-W8XE*e zy6>-l_d5>Nw)#H4)|TDng%&?cr{Ja2(_XYdM4AU`VCpb#?+t6s8y%?S^kc;~VJjbh zz8095?ZB3I6O;EHf}f!p_4Lx3_2xe|H+gk?8o$J4yo6qm&Af!K{9IUAc`*E?^b&h- zCamw%wrVpolkbp3nkn$8U5)?$Hwyw(bL?()T2%R zORn4V>4e(J^|f39S{CZ>%59k2Ir}>w#1ww0K)23<9)7~OH^w~-IUy(3C*0A5%>t2W-eI~g`n3)WY{UQ^yfcKTmP@h5I zJM@f~-s$hqsf&ExO4z;^t4#9JVK1{QFC)Dj#HP=yNxK~iMV@1l3`#-ASt|j0_^L4G#?U!hQyA8a<_#E91K^@IJm6hCd<)U#*I~>@lSJ>-&9zdK#C} z)p@}^6=r)`wGMnim71Dd8r;ZA(MCw&*YjiveUZG4iia{-Vg^$S1%QF;;Fktw2N&(R z@tMm?&EJF{9(|Tx>g<`B+n7ezqk!v)ibx0q6=#{;zPg}*`}_3Wdbsq#Tr{H13@Q3c zbosPzb6V#yoy00}hg#n0Kd`r-CQyH#T_M^`yWcf9G*h&qyWcZoKQ(?}$X8@kl>$#; zHW^i)XbUR_7j`Xr8Mu3kocoJhU)()bwhn|wve&P6Pbh@IF<6!~HF6u0P^UVJWEOw+ zUty|kitKT`Kb2_?1VSg<#!MS(Cz8zw*>ig$B02(n2_*18-{x@msrmU+ zl3X@CJ2u6_x_is}@AXwlGbiRWaMXzHR4yzfPb6XqeU)!bj-Q^|tx`>kSp8=dyEHXX zF7(}JGd{6puhU{8e!QH-2ohSVZg7o|J04zSXF>@XM_*f;*q)0Y>K&JUyR$=xu1MUv zkq%Yd?8+|Vccl;{kLK|Z0}k(VvjcpXkxt?h;h&CQ=Q9<3E_&X)v_SFdbv6mE;|n6% zS$XutQWg?@E-@FV+V0x9+n;SdOVhD>#wmQV9sXfb8ce~-QAgz?wPJ@*Kblk{PQZO@ zOsA&+m*Y5IM#F*cYf{Xck!=|Tv8w-}dZ#S1N)dZ2gsK0I=d&fLyG4?KBT^MK@{^G? zCqozYvJ9C{SyDyuww;7GlfAa5?Hc2`BxlWS6^((Tx9X_m6Ql%C;WZ;-cLArggPeJ9 z6$!hk{eHax8VHakaBrr=B1|mwD&MA~ftA3|B)5eZ?l^x^ zjgNC06DQ;@%wc`%Rw^%aAF13R^lcwXS)~6$hXLq$w$R3`zE4Zt8;^%WPNofw^0vLK zH^AWoitPte3%s4Yz~ERCdW+zz55iB(O8*c=bs{9ct*3}`EXF^c=ozshQ$~nD#qCy= zHeRijUGh{8Nt)-UIq1~pj~A)Es9f=Lxr?vSd{a&k^X+ZH9{N))Lf&F ztGD{{i5L0U&Z;ts4g$&Sd`(y}i4{?dh{PpRt0h$CWgAuqJtVr~m$x2|x#@1%CdIG_ z6A&p{@8{KIL*m?Y^rq0#W||k{7Wx9i&M-9r5xUZIP3j|X<%!EP5NS6#KB)fg8%DZ= zb*nKBvv@nYDn|6v_TE>fxU3}4Pf(@WeKf}zSv=;eAP7vkZaIQs<>}-ly49Vz&0gTT zl63JmOQ$%q?Z$(ZTpMKW%6i9IrhD1w7FH%VWP9&%e;S~A-qom~p`}^QXxJoyzO*29 z4aaiRe0|oCo_2J}x#Ix36YT;)?8kJc1!GOOUo$-T>U7*!pO5i`Bz4r*LN+Z^O@5~@ z>#N}3xAO&72#3hYAw40ITj0pG6RdX|Yn^;m&6)HB@O`{oDLSQ>pfrr2#Q1(G8Ss_1 zM+V6B1S=MDBDY2J`NJ2N6&aA;vzISs0Mi~R#$uZ~UchP~<^9nTZo|42hB{3EmCwF> zcE^DVQm~eNvlcR{O-YjkVMc!?Gm>?QUgwrAWrVYa7wBN{OG}!wN^zUxBaFU(iVwTP zXd>|vVZIoXNRj$pB|&pwM!ueBNp6m!VXphi1u_;~aSnDE=QpEo3GF3}Z{R}fMH#;3 z834xtg0N)Fayo*~t#~XyBE}Xf-7G7EaYQ28YX!DpWsqfs_|dI4ns1;#Os!N_XwXykFCzlzyD`63r+SQ*$cnTnX*zpaUTUb~Mn!mm{HJ{mag_4SaY z{A&F*Ot)r4aDBK9=YjpCIhZ@93zlmzjv@<@%26VunUXp^_xnPF8ex}z(n7JbVBpw$ z^3qJ~w3r`n0>NX>W1HsdU8r&7WG@O~Mo;Kw#S29+VsW=a5Q}oVjf5I?b#GBhbONeR zgcW1<1iV-71TkY$`k}NGVdq~Oigv?W>(M0axGb5ke;id=61bf9i}*zNmVE1A=2NZ2 z3EqlW>xvIP^ifz1f=7FFgMDWVOt^3T_0)qNGDM*q9pHVhcT!Ae5LjR=eJ-3>7l<^cJP23uWp179t@y9?$LP%+IpmVIL>B?; zRqF}7Rq=|LUSc-ytmSsA&pT$RW3I^EFH<4H+sV_nF#B0wSU%}Y5AZj!>$5yeDR`8Z zk2yiE+$qK6TjxGx(P){QS+1R?)$u@e+77b<2YqsWNvYaei^O#OAae&V ziZbI6S%+b?JoiRSyfi`@aeuYN@x8+?EZ4PG8a2SKz+QbeH@`PGt#VNW&-<6IIpt4U z#ZSDqfpMSA`v*x*vq3VJPA(&~7tLw7zO^G1co7;1@V(=Xp^q^dQS|N#nLX-jy?EO< z>&c$M4)^5;6s|go-b73Wne_5IPFTww&|mA$+JXld8$zliuw_4|72L|$!wC}=AT!HM z817SP)*ben@bP3${E`goGT}dGMklc}8xz}Iepo4wkh)G^OgIY-O2c>Cme!8QEP77P zO}0dvOL3d0W7Qif5qWStMZ3qzpl(i3E|SvEW;s2Fd|?OP6YCGO71h#r1oH0fOCxEn z)k~8I2WZfrB3{zKC^3j_Oqlyhr`|Ht;>xHr5xzS}vyNERg$d5hS_pvjTMN{gz|D#{ z_j2{}5Sb`eTm(=;voLJa2vma`^ze6ja(gHQX2IAae)O@2py!h42D$HAYNx)O2!ua5asV zSs!TZqUGQ4WEyt^P-N%uaXQf6JH}r{riB%@ASKfY;ch+WjdzmCNTUA6g3?N5&((I} z#8%2(U;rsoa1t0X{AL-hvM7ko(j-x=9i0E3d%KIJg~twEMQpbBvBi*!k6%(=XlVv0 zA|;SB=S)z=3BPsBuRc%&s zHKeIS$|QhRVH=8?CYo0UJ2W$N*vW_PUBt0!g*~Wqb~fKy+sSZT_|C-&YY_Ow7!vv& zVRcguUy0sQp(%jD_~u~V<=DQki^k#C=?9GhFa1Z@!-%K!)gL_H(GGq(&a&u%V?^7# z-*<6R{hAqa@{|>Kvby1oE}L3C>v_FZIbYyS_~v=jOYtI+$_`x;p5kSJ&^U9`a>gz6 z^$plf{F~y6BwiljwV7{86EkVLgzvI>EmQ$65>UTACYKS^z1J~z}0 z(;G!Ee@Mig*TlR2RfMtrP+c@9Dk%r|QVQt%x+tiATnTqD>3V!slf8}BcSMOTv)m<` zq^nnZ*xRPf%|;;AKy5gzViuJMQof4sZ5<|Wku<6riz}fiM52w-Q6T-;qjgf5yu75* zV115!zYq$sJZ{%yLDl8TiqjcuH_+GhV~H>Pn&(MzJY+L~eftEXcXdWo0w$zbhkb@> zaPTPGZ+quw9wAJDK!-ETT|v=KRB`4@P=|KK9i$eX`PAU(QU#>GQ1UDFi@V0WF5|uV z$_msF$ZL97&JNO*wMP*xIOf#D5nmgWT|MPYCX7|NS*8@QAT|gu%Q|y?T=|eod0G9?mD`IU zV_1HYTS-0O^rTl&(Pxy#{qpb`K0yFXeyGOFny#5w0}|w-zcKlA%V{tf;t5rgVZr*e zM~Kx|XR|3f=uSW>z3fTXJ;$&aien7x=gv?wM26#lo2jQmF%ooU_3_ZI<*`!QJ2IlJ z;Op%f7&~QyDj!U^$FRV?2RH9> z@yWYghKOEpyvPVnn>!zlVUIuKXZ1ADk}X{|AK?vG(tYL@uIk?I+EfS-MB^#L*Kmt$ z>_K%M>lwucy-kXHr>8$%!TdcJ1s!#rOnf-{a+jR)!eFz*+;8oTxK0|a*mx+8V_{42 zug>f>5d9_px|2*tKXk>-&3Y#S*4)@}ZHPPtacQ5#&9$4qVbbMi{oj5&=4TePP(K-V zU8d%g1Lw{mPcAjxKX{u~R-+VwuFQ^2ziio8U;|b<67hJD7dn$K17t|#zg9FB(Y(!S zs;-lOfAc}kzA(xakvWc#!KRoXCu8!s8d2}&$5fvvu)P1vSD97SqVsn@x)1Xr)xslg zo(mG*f6l34F<-#^)w42LoRCXAanIR=`vqq(Dzf+F7XzB%+}pCZ#|+NWEwk%^ZgJgQ z1eNJ+3q~U?m?UBk9iNJxPD&+Mm*^CKkb89JZgjKR$iwp_HhoWQhoOZo+0biN)$rCv zdXbO+Kw+-t6sy^tFD-K#F#N`W+c#m_M%ElFeP+a&}b=z>sRG5xOgz$L_u3n^6zEO>IG&QB3a(uinoDoc+AKxC>fJU^eJ52=FPC8Ds}eGp#Od=vX+PVodv;9r09ex#mo0H z>6~K@AHJ5*gEVj5p&m`WAXhfAd55h$Pk7OEa}rz(O)Zq>!YC(fG7V{K!{14QS>yNz znt(7rkV0S6eh z1a@~(ACfrC$fBZ<>q0Opl z`>Be9-vP$LHhf*tsRby7<#-sSc&MltGAnx|v)}c)1Jb=* zvYGlDh#qE4z{-!EhMf0$Ed3T`8nqVenoz>2q|*YYK3VT&-XoZj^uMW>L%QhlTGK=K z5D4&}I9$<`QChrc?vxmcqhTvQ6$pX$$boPiI1HEpx^3zO(PL&a{U}XeOUA zDTf!68;I5;&Q0D=OS29^X9TaeH6|ODf08#4xJ0a)3$(LD!cD0`Em`@+PWF^rq&s;{ zJT|DNzeBrzskLWT3;8BgX8)icGLb`n_RwI+TL(W%bOH>!W^Nj=`2DHNtzY|#*^D3k z_u<9L!YMztrYZ{J)^+?8DMs!lZTIFkMI)YEYh&}L&DhFtImj}>YVvoPF1yC@se{PX zt$G8Q$_ZKs!G={kxq=Lhb!AUw7vU^qsA1X>MBL#j9fz57WtZ~pRGAIbiVFewWv%Yg zpIp5-Ip1T36a6edHsEFEGhXsDb-s?CnJsa90_hCr2=;E{C+gl8_#1q~>PzgMBawuA zQJ3Hg;6R&wKxUC0RLhY~8Z$9xteL)3NRCgLbQ|+twNCupSCiF1M}}KNTHrDd#wv!m$FpyMK;G{U>1eiJPZ}=F*b5JqK3?ss4fY6a9o`ZhB!2(F zb0TgiE@zDkUgugZ3rEXMVXPIkV~)01Mg6m@B5hADtU8$w=pO!U{0vAl`wIYX8JgHNa~Pe=PWku z&^h+n8(Yv)WjXdiFwr#hE+6Xo(|Z`k#n7}}f(9X|i^&xFUQOkM#v~9I$C&t$+13>) zJ}aqTpUh{bhexlC58(lO#UP>5uL-76rykmLmykibB8pppD(a7>nldcC%3pABy*l$> zN9;7w-Dh?aQK}yHY7)y143}Y`Y_o7`SEzbPj`|B!2{zSV_Jy-gR}yMJrz4KglHPMa zxJ9?o4t|<~D_UitJ3FWbzZ>3A6)1lHdp&W?Jkk2lt!N?B!lZX8?cDjP40m-b0WpAs zXjNu+B|pius}3Hr#KhUMGY`rD-_1p6#{1oRYf@^XKRF7nBgEVAcZnT;sc!g=);*Zo zo(5=ls`}}&$@pMh%u1D^8t+|vcue22y0Jp;Eak)MxetsXNQ{X!zHnpK)w!4QaoKWz z;Q7jP|1M-`Q~guS!z7OH0nCO)-*vTuC6TN?hI19kXl>OCwKfy6AOdX+trixPw%8q?bIz66oP(_NODbp02u2ja|R--PV< zRrP%}L=y300peOB7d4X8A}29f*rtnxf|_RrIdAKendo;-{N9v!oLN#?8?%b>Khg## zB{6WkSfWYENG-X(i~MFc%~9mOasM==O_q3le1>@dtDFLD14#`N>iHZPokOZrfX7hT z9i8aw*4|7_>tMigYD(QeJxoJN2Rs3gOfbO~N{MW84d8J6#GYc7#lN3DtYI1}{4`dH z6&F(77}BJ6XAI+jpx0-xS&C}fC66KuIU!J(JF}w`Hf3N-Dka(d{ax$$tuEVkMmF&1 zf9vn&0(dWmvzcl0X~j%${LB}6KyMwP{0KiN|*(=vaDiuj>;ef zwB(0D=?|5@jmIjZMc6S4u=H%8%~URcWM_gw4177TL-}!%ksnqg)NOPQ!RaS1rj2uX za*sb0fd2kc_0)-$xBRv#54P19G}JL5Og6UBDt1xx`|bPs2j6Cn@@#ia*p4qebih~@ zIsJ%LuX?!NJG|5ylt>cQh<5n&niKJ4fp$8 z^HPC1Y<=Qi^l^B&*)J6^7F(bW-=Vv67a|`|?f@QBqpwsCo1v&*v*tD;@|oZI{KOOP zwd6)<)XxGHR>eLQZRPJf)dNvJS^{rvkM4V^PyO)yc-Dx*N`=Bq&EDK!S^eVdAp6$f z0WTi1*NS1Fb4brDBFI(S0L{s%oCep7`2-WSRc!Ec+mxA%pT!L(zt)Q4g@BKZ6^J2E zyp(+!aRiH8Uk97HC`{m#UYifS@J~v{~1<=jg_x8^b(>J>AuW zfUN4Mw&MxFirZQOMeB`GEY?`%RckU(hml~XuB%FoVv^fQgw>39;mo7O!p?|3q*XYg zDcx2|`z9lwBcbw;aJda+h6$+vTA;so={F$2xpBnRWbM3N}5Rj7T~ zZX+(`9k^(6S^nq$?dT`Xff?k24p*dxIS>la`Mm4btnsGT-4v~&7WY@$??H}&(rk;fIB>X=M5 zEk8y^I|}Vj54nwwFK}7^_IBIj7>E$fweKEKfBenBk&})2CzWuak%^?C`lctbGOn3j zirBg76!z#_n$8W9)b#^m?Z(U6G~IiHigdz)9JNO|>@wh*%=6YsQ6rtOKr7g92T)AE*~`?b`LS-L=wZTa!+$0ht$ubi&)S74)r^+MWILEXvk-N zQ&Kg1UIqZl@mxp7I6o<8VXglU|CRGOgg%l$BD!#>4{v=U@#M}tz#FK7Pga~)-D6S8 z;-BI>G$QSo=@RA9L6j#$R>#Y>ELSrss+kX;v%-^ym))wllw&Gua59gH_*3gqg<;tY z3;C+-PJmY^9RfkCRJb?suoh^FE_Nhcobf~VOGQ~Brh4-uA}wKeyFz&MY0kCBbP3a7 zR>1vML5_wbM3D#+#{^O_dUR}_LH+A;KIuIpDo&dFE?EG$&&S|zyaLE2_P$?==^BD2XBp-b& zj1UgOrM~`o(Do-I08c{;nfEdYFlXy3YAeP|?IWTRM+6?MHl-gv>CY5+)}F2B%HBuu zD-m0J(HUzsxAfbu_hLH%Pr4WcobBl!Cqp~Cy|wz5h^nT>Q3P(k7HV*D)fr;lD%W&8 z2B80R?013*c_ZlC)2Gsm->pSZ9(!kcSvAXD(`>&7lHZ(Chr@5)TVY-kW0^!YbYdry zduD2m%>S^WZZ*9PYG}ilDEtb!sI1!FVfa-i79GM6Af7x{#XX8o=J^uq$BXe|N1em& zX7yDY8QVErhH>iN86%)HCs$IU^Er8!zHExktJ_r8%z!8G&FlxGrn#?obMu<)L<+e1 zZ#Zk$7#uU3Oqbo{hX`VASBSMiwo*4!-xtz1dzY5ktKQS!=!sr}6V|kW2fz8tH0g2L zno$t!q_$&27eBI!!?GuBd?yMpggZFmkFs_a0k_UMNGzFrVOo@`v~Xzkot1ZuJ?E3) z`GydxlU&4MQ@Yw(3Kf!|5dR6?23^+mQM<)ZdruItWj5l%q}gYjSNc25+vur%Ct1e2 z^=^MEy>ju$PE1Z$y#gOBH_XCch=q&H?ZtwCR!0uqWTkti2o7uT7wl!Fcf~L-bmZJ5 zM0nCgm=%$t0I#RsUIDzVUsUNf@r5C zH^%J5Hprr@%&CMbgQU3d>b!?rcqeQOOOeHkYCRnU#D2j&YLoc4kL@Zgl{!hGi2 zn)g#0CKF-d*%O3p3m0bv&gXZUb>CGA&l{7DR(|6_S$&bh(wK2k4ZpbN(0tEsV5>N9 z&ab6sTlJhL-KR zVu{dTf2K9Mq#o;U6nErvKOy~j7&NDKI~-r=4+M4K)Y{O=>kpZsG}Jk3*~Z$;COwug zpdNFZd|8H~FAQ<*4ESU1$iAw}T5)CMPrbjOvt=^cvB8Xr2a=CRqT)aBqnJT7A0P}O z#DhF2{%Ii(E!n@(68H)+Aq5&i{i*Lmv$Z=0#r(%^sRZ?ILp<%wgYI+k@(*hV70Xue zzIg^Wa0f^z@21E&km10W6etE``Bh5|SqQrJ6RVxH%U@80T;o|`&i_(tYT$i<@5g)p~%Fm@g`L%1S zR#12Pwz+os)g<|_Ps8Y30Yao#Y9z1k$mdoR|lJ*0J!YIoe zWsu|5`33~U8BUFs==x9+&1+>!(3xR?9^&Zkh$zNMGdSW~cWg!f@Dn_b?I+vhxMjyx=zy2+;)& z!XU5fzPL(TAE2T@I+c#IKYpeOj40RoC%j$@_3J;me}8Rah73BRr{SD zwj+Dvavi2#!Vgq#W5I9U6~b6AHR@trM6g&)M= zTSPw{ElnUL*OO-hW(`l#@$gl-fJ)DeQ0i_d6-ZwTkde@zBhDj^#=AO_l|n&W)`oJh z+dzgezw=YwhbXc~P`v}9B6U87HngoIX9QFYdy%|6Xl4)GYfL;DjU=y2(~X@7RqdiK zN1eKcVHh#=|G=nl$1(Q~-yIz#R5x`rJ3*rf(RXC53_ErctJgYiDl3eILD$v)!}dhE zr3Sd=(!}UaVR0{%n07stw*Sk)EG)<7jbwYIZ4xP4$ItJ?SDK7J3DFzk{Ey$wZKpyX!&sKx z*hin$>Ctp}ABNpj7Oye0vQkt$xkK)p@)U-b$9a`biq-;sX6<0o5vpffJi}HS(d_Tb zGb;>kTovT5Zct2cpHy!T7~a$keM+tkDU-&|cr#{ke1TQ>H!nPw=p1pirPARZ`y3vsyIADhLr#n3A@37xilqr@*}LX;eOcUW<}y zS4V8<4l&>}!Ozf(rFp8Uy>W!&5~|Px)`XnTjO4>mDqgTkh!e4^XGO*aL9K-ml^pyB zuRlTM%${cue}f-9gio}bF)d*=;s^rlbPTvbpQc+5qKdpSLsl=7sAOBOGkaQn&Goj&1xrh$Fvwl^6 zEEIYb%KLCx7-M7g$cuXR`Xz#)iyiTW3Qyy?53xDQ`y`6(w#1mb)^RiFkShViMBGRU~Qy~j-HI>QxPP=K|EDAX|W`Eidtr9({%aLJkT|2&h;L@Nl=5;r8 zna$glvy=Nw$b3aKuqw!9!W6!0f*)W3NzEG;WL&s|H!IhX+gdJv9y2&X8_eSgE8K}Q z&!)8Jg^eaum4))*e~e5K8B%@7z7nP0&t%`|`S~2wh?dlKzac-&+vp+BcO%uhe6TzFp8ZV%ZNb90G}`n2ba(2HR(=R9G^FS^XcQ@k>67}>DENge9hFz zlIT0#hNxEduAcV}G-t;)VS+dO6k#mT2tUuJ^j9CGMii(5KG+a-|BQIOqHY>y_WB=F z-;>?Jm2^2y6M*RGvNfI8l`4ZY8fPiHGjLNx2Ss<47%#~BOEUx0_@Cdg@q8&4{=Mc2 zU$$36_D)#;ZSJ07Qw<2I@6aVw3cDLeJ6qV8w0M=!BzuLZ=7OlMV-WzSvf|G;v(>GT z<%6tV)8lY>i~1v3PB2KBx0NC|G*^snxOwsTo`or!DGm?8?i*Vz=@?6)b@r~xa$G11 z{HV-+bQr*TZ8_$W15K>5EA5;o(XGc#Cvvdj_90+Qd5OQ|>XM35aANGLR1eIqKA!~o zDgm{|Espt-_%v*NmD^GKyY{Fs=hM>N#wY69Xp~iEZ{|c|ytk$YFj#fWKHNbYZ|37( zehTaFDf55r#UYPV=U=icbudlL+NhUbAvPOCK`%nMV9?@vn9d|J)5<|3O@fZ*@h&zW zmlLlre*fMTF{E%j{X2Yg)H-gQrMjVC)>13c0T`tN zev(qdrEDDBcbp*d_m@=jk18y%ZI+p&lEZ4%Jd!+Co-p~6rLco(L$@L&EygaGscj9( zG)h4u?xYwq-PD+J%yG!i(nHu;B$owwsh_!7r`H-*$kBqPLStc!RI1Q$O|{I(VmhWr zg2g&*h@c_@YopBh`7&OrcP(NLhJuNDlrFi@zPfs}2I z=De__ix-rFb}p%&ZJktHbBF8a8%kZo2R4I7iVP(6j)lKXlC`>aa~bJZH{;@EPIs;F zeG;}c>U~}~$^Si-tUSI-lGIO2%UNiB=H zow2;5Gl6w=W%910WJZ{aBl(w1U(4zgR`>=<>`_5x7ksA~)nE0OAlmdIzqEV2maH@uaEq3Ko8oT);Mp_g7r)A)Y%4yEh+i|D1Q|-d z?d8!GkGRU#uoTCi$i+-I2}!+qBYAQ23>E3QJu<7k{S0$1w>Ig~9FcY;BzP!Wy7ql$ z7gYg;%2r5NXdPJtW(B-KM24(IA?Y@e`PNg_qE z*3i*MoAI6F(m$P{l1_UQw3x$L3$NU+eK?oORA6vAJWMA zSm3P-;T@Z8y8#JHvSP?PnsKEZ0zJnny9eZ>34bFA7CNjMjT3`sM!9jyu|DgdJ)r|9 zFX4iNtzOBp2p_1s{Z^gN_XcLr=GMa0vI}>ITs$5pndXnbvh?vSJd_1WVsZUO5a5ez znT4!$zYd-5Y!PP=EurhNgw(zZtKy}}HIt+ZI%V+IU37F$QzFLo#Iwu9YQbq9tN-lmpdNY(JAzSp&M z7OdJGxoSl^y%v{>bbKe5=eX$F=DV;elNopV<1%8iEj0f_Ajan)WTb_mE zlH2WD_#7Qy93P*`F%V##TXJEXD`&^yx)-G`cz*c_(9rhybIrBr{|K(I3YxyO^OF-Y zk_G7-{)*5KdSm>liCI^KqdFxaPO{s=tfpfQ#u&NoE~98$XN03ys3t_GYR*7^;r%w> zJ+riK11rA~6RIkpC0V>YPL`X&w4zRANvvOFz=8S2Il(MkiJLGwzT zp$G<7SThQ){PNVT|(3qGE3?XVl9B28$pa-(B>Ke&_`dXYT62$mACD7_I~7 zRjwE=M_D?E=JBoQ+o%>1MkYv_vk7vqmpXqZ6Ybr1TSurZw65?vnkYi{H-s|cVm1-Z z8hh`}%_(Na(_$C(6eupzMmUY!%3q4I$pz!5=Tp}q?NQP3eD5E=mW|jMlfi;|o;Z?v zsMJSQ?+p4LG>5rawCte!9{a@w>6lH$W6CgzXp!Q@D6z zfKA`vjtLQkx=M+6F%`Df4k40ww+l&X_}N~g_RROG6)lN)B!MoM?*42)nUna42;EZ1 zd{~q+n^xcl)kI|USN4c^{rg~J7m)uxdkUBODfD|Ydj#zkYuq*J>e>*j_n)f;TNYxG zm8|^(bE2KULnb!)+Ip~ahWy%^mQM^M-41i}Wa546y;MaFGI;Yu9NJv7jEt?%u7ms0 z(Pzxs)O?eD^6-u6G-Kt9g5}<-f^=q6>Jt}7U>~Vd0XJ)J?`C}6ef&lYyn>5-JPo*tyhoI;x^MA23&S(y0S6h z3DCY&+l>i|>h*t0$9T*1*&?Z!*d18EQ^e~Y-;ho%%mduW-#iV@!<2 zIhJYAiYtajDmaw%0opvEj2kyTPON9P@xn9NmmJb#1pkO&W|Sye(8Vc~7EtQ z{)|&h=DGFp?$>XbBz?j8nItqz);Y=0@c6ySq{{uAjtOPLN+t6uD0X=AEN<)e*@#&t z=h^baFBc-0Q8JGVyTt>{Y=am_zT=hc*>-tRdNngv&Q~5_-fz>t9SB*_S99iXKLP!B zrR~D2Tfe>MfcJbn#HMg< zaa+*~{N3@xgIe7DZ(~^yJz*#=zDLvn;_({~rjPRNHnr*WAMQNh%Egc*?}$jWGz)uq z+W11tS!KSN#YQ%PR5oxECzwQ_yd?3#IFXG5q?qm`)I~JujL!GWsR-*spX6Ek)&tHr zE9^^Yq(!>Eyrt%C*83r6qJL@B$%?+vj@#X3TZg?Shl5}o3@(oq+rt0hjT-e>?(86X z{m}8WFLGm^#a8qKiYjKd{H~^1!(i%R^vB4!;fJMST}{jqG^zWZphqRe7`r}SD3Y#q z(5GWlm@zwG`~qcydjiI2MT0h`;upz9h5ZPez4G6hMR!zF=)9p{KyX>< z@{BTNmTD)y>4caQY)xPbkx#sIB@DyYZRy-(QbUn{rg5EN z4N6Ksz02_A+NufZW0s#|i&5FxsamB!eEkrQNp8G6Z0t(~-84+*sgl%65P*H~y=AKm z-9MCr={reAdAiTQNSIKgETq+L=tBEzqs$_g6-f$%`NvCNWJ*?G2^-OUv%M$bWcIh! z4&0CGz6g^w;R`qel?RbUtl^kF2`UJ?wX!moD{VtQ*H&oEk>L9W1CBC4X3E&9wLAY5 z+5%k)se$MUbCmlU@<>qmi(XOPFM(2u)lC|Nn&qWcu9~flO6?zShXHOS0!a?%F>7Hg zp3Slg3KnEH4bcQoPAmiLElH1JZ5D}w_;B~-USSw63S(l-4Lq4!GQ>+t>E$OEy!-TJ zG6m#T7jwfqTdN;ua_!rpRGZ#ObqU#>7e=mrdXFUaaVRWqN?k@znA7?sv1No#LYG$E z(mdV_9rZFx`3xh`Svl8Z%$0{Y9v6vg9BMo)>NW&4KZBr3_?}wRHgA-(vO1yQPD>j< z-Nu{DKBz5&n3iOSW#)L0ZR5LT;r*Y~T-~FUK@ititI9h7S0(>eWM=S|qFRco~5H=FEO+ z(I4rz6a&LYAD?}cm^K+recEGD6XLJ*=lk9>OO$Jb^T^f-6#1p-TctE%am1<>>8>8J z&=RG6JdIvV5?{S~{X|e@>x+SfPrw3wHYnJ8rb^*ey@0j`v{ko0&z(ToTYh@xZCa#d z`B={fp(F|(yWts8%j?tR!jxnh`F!@tV%nX^YwS#MSOaO&h7p_<6=&qM{hi>O<1u_| zNJ$Z$o`c8FN-qP{&*d*mngT_iEen)L79+|x=Oz}$Q`;pXE_wD=Oa{vpcAr83`{wXj zDN2jN0mGy#akJlbIW(3_Y{5v+Qn)GX_UpQ%=zA{nNcYHMTy$$|hi?94m@J+a1 zJ+=+hv6UW=Va2LKpuLmxOB<%o*91LkCyr`ai(AVt)H(34KLdCXoxGpzR3C4>(Wtr8 zCcPuf8_k6NY6YUgddsvhL-*Tb)?T^0q-uy|G1GF>aXQXn9{>wh7YG$P#u~J5+F%Ze zMhcC(qFoM*#Jdf?JCaq*u<4#e;q?eHnONhJU?a#7GpqTsblDw77w>DfA}x5{m~EWa zk{4jDT*EHVPK-OoU>n2$z_}(87vQ5y-Co3s(!y+P5|#&vjRZLEkI0r6^bib~3%fQ3 zF6CIw2u073fLAi>&pDOFjigkQp*oie*NdpdSY}qa1h5N4!#cdtHPUx{Ix)ZMRD7YzNJ7NpQV6+Y{U-8*mQRF>T-nUpwLZ1=**AKW zifvYDjO(!);9n8k|A;`HU%}pr){f&j&L^D6pegVjO&0 zo#%SI5G8i_XX&WTpFu_=tOs^|b=jmP*wH1W{hFqRhDlhd(Dr~ z2*z}KO!c&mkaZJo+w-8<%{ktlgGd5JN|pylC=!P)7KkWHiGqZ`eX~da>-#8*%jwH# z#;p#LSq1(-06sv$zv1Y2B0+{JFtE}O_VlrZ3IwPv#EtD>iEH9t^YUYUUTwTd_ z(x=z#sNzJ1Wa&Ttp&%zRs$=C#TYn4Tgf2eWcjN+aL$31N|n<_ZC#3_9KYk76m-A8-& zJiI&3J+t&P7(l6+nx|iJaEk+IsZtA0PLIOJ^!$Rd7Bdq)aN8hagURV$_?j1lsyw}8 zC_K^CZ@7I-T#8cLh@N38?wY-}E;e6*&MSdRXblGNaaK};#w(j(WffTr00i$sakDSw zy^6HDXz@b8|Bjmk)2`qYN<{B{^+hMO&^rSxG|rgA_~+%8n=>I0(gzWuh}coG3$mC^ zT?vmA4ZO!hELA@uy6d-Hf$_c*KE6z&<>(%1Ukidq67xL|lJoLABQBZZtrB{8}S3 zz)t!_sGUNekY_!sRDKjMJmD|fR@f?-k0nZsnD~wo9B3HvuLhG6N3Hq*PhXuN^1v8j z?T@HCN1uoE6;oW}ZTW<(3i+`6zId`aLKv|y&B*}Whx%U$`B+fM{r(N+tfy8}M|EYq zxJVeGi*IzhZRf(FM~%-WRU&~B7fm)j96LtsCss44EUQ<@!) z^RiUke-J*gF^3o2qD3S;eX}P@nJ{HA%kym`ZhJGLUzIw}L7@?iW}uC&v_Z{m7ib-v z$>2y!oY^6^7peCb*-V4>ImD_*MM7~vED6{y5$D)DFCcGv@eQtfhrSqKnN|p<7rp+r zGvgKmz_TOBc@7|*Gk?<5jaL%f+Q@R$HW?RHKP0F&R750E6Wp3Ye- zXgteVL6p*8P>$bZv9oIDG#WNN%Kxh_d&QDhR(w@j6Efi!Df37^NMWw-ZI1LFy-J2N zuv)o2g*n}}PfyM73pQ(mAaV(TUU>R)6kJtc+;VSCK}aIa%}K?NMDTK=yQ(UxKfs$K zx~=e(-ah||s=A-_i@a4H)AP`Q#{_43dn0FJ;kKNbD;Pn)tK`W1UGZf&%zCBKOj+r4 z*iUK$!_T&}`3cdu5$BnsmgU5D@XKm&;B`OhvgxTBssKN`1(z{nz!3Gpu+u5&#p2Ky zW~S6!zZgGagd@)Z4lnyh97X$PL?Ctk?HD^X8*;0{ zQO{QTkN(hIDAapX#e_rpB2N5cZAguA@0@{a$#lGi1$}q#vefb~vpx`=TdkJ4F7Qij zzvH66Y>Z-1O(^oWq!g}M6cu$e8W)~x_h3%kLeroU-o{B>h;F-~%#sk11OZ3Q~Q2MD7{$Btp zK-Is1`#Gte>`Z<^(!em@ttju+7%6DMILKL7bctM##0f;`*bQ6R20i$WVC>_kD+3Z1 zYUq%~(RFN`8)W9MtgoBtnsfwuymhcgS0=0txh~40&Vd+O_y&IrY*Y4$saY4E1M8sb zXG!f%Rj(ko;v6Vyu=kE;4uplTUDSlK_o^}}{BJRhI?kj&Yipt+!IocF^a)gG)iYOI z%edk~LdBmlD32p+f4$43Z9Wb`{mRN>g5iKq+XUNdeI#b*{5aW4$OB9r*~s5#n&rBs zX(laIH~p4b#61E4SSEAI4w`6hdRW&dyX?v`C=InI57`&T=`ZJ!gz{Q`T-qV$*M)_` zR!#(^6xG<Y7+5$={WETsx{%_BKoA#D1Rds2glsmsS3R5YMq?v%?kig zLOGpXG!st@SiEXBkn4ts0Dsr$PD7FVxgevkf;nB~>lW{L)L%jmH@Jc+r&bvq_`tna z^!ITyZJ1o1&pqk3^gpH^kfOKVc_`j%dU!MkSw~NWGSh1^nmJ99SpHs4?Hh;qq(9|9 z5Dnug@(YnbjGA6Bs=eWgTKE(D z?dwW@;;I)zvfIzxCL&yN10sLNbFoJtDM?mqr^p{#(=Gly=Y!F3jC-4mn?rL1EPRj9 z-U9j0Lh?A|wHlFrOJmr>8ph#|+zz26pD}gjS!9}^^3w<3hW(^1fTO)em)ct!1=B{e z`tH)m(jUjFN4h>L?cM-YoRo%0>Cv~l7UmnMdrk<-<(`9)c>wQ0faKRR(BK?Y>aX9m zQBO03itEsqAoll1Y(ozY+V;jRgA|(swG%ER9CCA_^CC0C62|)Imp;jlhqvP?sq1Gp ztp(=e1^3g06IH+VpS|+U{6Wb=yeWFUKR3F|QxKhFiAU0nxp#B6{S6z_u@Xk3iXqzs zKyD9}hjd$-E9vXZ@dZjyT&pJqh#-*FI4^IRWRE)mo?=H7wfnL8;K3~H-eJtK zn9k#R#L@ZQaMe2vNijXn?_{a9EpB{t=UQ(l_%EVa0+sdMGYI1nm4wFB5cn(m8m@cp< zR@+A<3yFV2Z_)?ph=?XA?0+fdau+=3z6URB3gw;P-5UtdgsiO^w|BUE^8kYZm0PD7 z7}nfp*60n_uA-l66=hS?7QG>gdizWMi|8gsv*I< zm+G(?==$D9h=Eg}b1lfjT(E|5Lm-qJrK=o*#$Sx5z~t~jRW1Ls&tcp9ro0E-ZT|!z zyRZVJGQnnhWe!}H_k3+?h|3YO0LYm)2q85nst1k8s;~h@^HtR)?Zh~7w{CZ}zB}uu zWM^%^{l)>-`XO88GuHl*iU!cezc1U;kBYSA#XdiSjb~$&VP>-mZmhX{^XqJ%Oe!4- zUL&d~;Cn<(2atH8Cp(Syv!r?xTZ+7aH`=<9I$l?RkmSN+CTCT!d>)p>Scmbeh%|zz zlB`@_ezp>?jd9(!qE;gWiFw8N(bf8w;Y-7hg)>U0e33L=NgerTWV;mdXBu1~86q!( zh^E`3XHxVbuIu~O#B&szZy{No7NSS(L9S^9a}i%SRQRjjx>X`PR*;PV84O1l+mZ&h z#1&|Z21^(=lK2iSDmJA><{vDIu%D8Gbkx55u0;&p>jnp!yec{Vmk!cWoL&gfO944A zM|PJ|U<_Tc<4^lAMe&_|ZnMO~!c|#QAFeoAg^V#SwT@9Ujv+LZs8AT9EI(|P2e;)J zyA;67^b&J-X>LnrHzu!#vi@S8o4)}v6F{+NXkt4FB>aekcXHb zYw1ILnkbPv%Zec>R5nn4aMSCEv9Pwz#Q^`GcE*N34dx~>16nH_*pOE8AK`>Lucr7^ zC5_#=*S21TQsMXD=1ESsXfy{&iQVE8W0rX8ko)}@b{B4K`Zv^Kj0+a_Pu*oqg z{b40EH>Kps7T4p5t82KCI&~^LmE1+r!uS6#!bLU;{t9tpL8a=6#$X>%K!mN9|B0Ym zVO0M|#+qV|doD!wj8fCRhsZjr^9b$R8(r#Dshv1)QLDBj{&@Gs$h&Htd{@EVA(<~@ z0-ekN>g364!31OXA|W)g%LZ6FQTY1L>CiivQT#MP#US1RKbMDc_YA?LQWC__v_I2p zQh*E8`=>?F<0`BwHgh4hu`Y($NG+>jRguW(B+rkGNVkfBIN^E~6=>zu>qWdj4@7UK z9H}#EQpS-Vm>j7^2|Y)CvGq3%(bCm)KNS~fVa;ReRvQ`Sl;`1M`oNS?eKLpYg`|i~ zMOB(jDU*H}irQpY7Z|Pe`)rUKY0>Y3-G2Ron8m+oKQ06qfm}ItN5tOHy(~&9;{EG^ z#PLD(Mq9Rg{nQ72qn#F1MCquXDH}geoYwTEMst_77uHE%Yu2iC2pCKfY2;mQS&?Oi zwBMT=dNlc56sO#TsJ>{p#iu)Jh|BX;yJ_n4-9ufa4=XC_sNiuf`RbjmPW8B9BFzR4 z)%cTTt7z~xZOa0$=%r^oU*pIC@VcXv1`@9i@4}hSqpi8$8j3L@k3G&hrGr-hCzMzG zZ;G@sdXrGr3%ucT#2h9y>Mz1XCBJ!Uias|m<87SO63RfHDt)_nzt~(?Bn&VOh7gbw zA-eu608ou3;u1MYdHn>;8u4TYE0T|*LxE!sOp`)u7A@`lYRF!j37~g-@2e#Jl^Im< z8psMng`|B=<~>!Dsi-$t3B|bCBe6ge^;J*Hy-qGTtjD3V*PT>-cK+V)u>f?_F!cQ{ zpX?*iuEO2S!cFYdl}S&*_zcgDz29{804JGg8j-BBZ)+uJNfX2_AMT=^xHBAKJinfu zN)rKJMteltD5*jC+VIHHEJNtV`DR^yY5v)A3Du@{-UCJE3T`gK%qMdWu>o&W--A&nPeSBS5YeBED>y`zwMNfLi^34>yB&=XuCVb-(FT za93!^C_2fB>&H&C%KoZSX*HKOX0GNq*o}oB!a!3=KE!0LAXc%P+#dD6f>o2OS2raXqfh-|3d&A(~8PhfvzP? z?=qxu>lGm|Q&}1)5`@5(+Dnh2U+nQ!ub(f!OKWprgcGG@U6Ctd}*@aFwp4H1)*uI36}s%8g!EOEF8^NwaeLP zka;EKD+kZ3Wfs5jCm#6kS7R{_TO8XJ=+MCT*g`jL+f{$4!?FuOwDDL%|a6rqEs($QNZk$1K(Ba>GG%kUp7P{{up_f#2p%H(w zmu4c-xXiJ>7pUJZ9f2!Aw_Cz&=)22pDI$_0ZCDU5YIaNn(Av+bsr;CUlDUrC6r|c3 zY4KKEe^676Iqzbqpzy(?6n*60JZzXCw2N?i^76g5`ZC0$41YMG!#q?!nV0v5oZqwr zQy#JGLJDyjqQ{VtPavC~iIOm^Rd2MInI%1})q$Qx^{{)UcOrSd?^!9y8T@g_2ZVd@W%ggCeN!BvkgZTjv|Y zSmwRgB)zTw1rLo1({y>$(i5C}4881G0v9Wy-@XO-vE^-8c&}!8HGeTWUd_K1bi3K5 zw%!RfU;bm&!a$AYt_hx8A-{VQ3w;UNvzA_~Wh(#->r9LDf~kM7aqq>d=kVbSr3NRO zsxWrUH2-XHP>Y_@CE3=SWfABio)dt`sAZ(n`WQJ?vwr^BgDEr-ctWi&D<903*7DI@ zezE%&D1>5{NO*EsTo*J?$7~Z^n;WJdYYW*tfK74^7#u%jm~Wl)&9Krs?@J|Uze(j3`T<)x+!y>qB$I+R8%fkp43Y+UVlBz z#&WFw%18!awG`rzKD)a24|;dkDE1JH5Jf~?tAJ{O8=JW}vvDf6TRx_-aYmQZxoyj5 z!Yqk-RyeuS?zOM0=AZPrvUO8cW{TqiDKJ9&!IHPspIqfCa;aj8O1dk%xwU8N)LX1B zB*vBt_wKZX5qgU5!H}QaGl=!Cg+^Ov*fgLomf=(HTkYst>=S*jc4?~*H59b0>U_(y z`P$o!5V@!a<)+Tpm0hkWWSTmRcMr_e%CZ6JD3@12YqCWZob#hKH-lSyd^aCBZ8#h` z){8p}pu|^|GkuChYV`ty=baFSJXbcZr>~OCX<4F7f~O9fhtlEX!&b@#!uLSNEl+<@ zUVzkOBQNQ}mR+Dm8eA9oC^l*M;Hq3*<)iTmzP*h{GumkQ$n}k{zwTdXD z{|@;z0lO)Z?O}1S;$O!}ZkUunkQ@xzX_(TymtVehssNQd+&ooTjGe0XPCvMjQ;Cjr zm*OVyDk>8k&!-|LY7^dxU2kf}bjy;~oqHSsHci~?K4Ii!KvMQ*woN$aJj;;&W(^3sKB0NGlX^M;cS7pS;vr~#~g1xCq*J%E=mbgP z65lTmkoVy~_>zoDq7T9*I(m+gS0SHz&=AmCiESNzf|Cs&-Wh*@|1BzhQ%B-N0M>5@jcL}2A=t4sQ;$SOLywo{QAPCTMl)n zbgQX_T-N)auSQ^^$QtLJ_{ve~+v1i_1qA(H@vAN>NMN^M{_0-(<~hU8r`Ut{A;m3fV;m?_f(w9rAnLc~*2|f3 z?Z>%=Gf}fvc1TE^t^M8Xe#~QAoIt?RBmWTo4kBEL8K&9nd-@v@0>84iUz<@Ey={I7 zY^Q!UYb(}yM#{_SP>8S@$RUT549Wj1L+@qckVG%~Fk0-D4{{%{H0Tl_?bq=K9aL?v zk*63=5cT>9sWhTJpuYxWYOGGO_^I{$qnqv2F&IRK%#2c|KH%hB>S`#??A1}#X;dEq z?X-LB7&F-|*L2|>y7>S+Y}3r9ZFUrg)Vdnces!Tf?iVU#0Sr~)NJHvo0+0~Pon%7@ zG=_k~2zLqp_DrQ*gl@Cy71p5C_L0Flp?|xIS)y^=&zLWCY z@-`K@$fLJ59r-OK6!q|pT zqtlD|!7ev9Z$1P-VDiwF;qn`llJ9_W4&qqc(P?5q+BIJ+k*HEodAWT6b`49iH2Gdp zbs$lKRBRHZ_G@Lc{gvM^b%@ZV00?0$VWh2IDN1iWnCrcZDg-a9|463=o|pW7daEGu z`k$PCa!PmKf5kphQ9SY4+?C3E!dUT8r|^{L+mY#!2t@+%abC5zh0?2NW2%Xl0xS3y zQcnPZ1{Q$AJ%b0Db-c!{o2*xlChD3?)J~tx(Eox8LPQJl@+oP6OZO2Wt|R4V{A)b@c9%ZDC*KlCc7=26zR?JD0L@=aSr8*L#(x^#3uoXk zyV4LgHwl)mqX4?O4kXc*Wr}t>j>IU=;JHWAiB7+=vqPc7HNI%)l7_&2}9j4J>|8gvHo@s%+E3e^JB(IlI>vx;WK3 zpfVQ}DVx3?JD(KzW@iU|7hmmj@A~~F7iQ;$;ckb7@R{9Fa0Bh{VyZ)t9_!&zJcdeQ zDpJNo2=v@D1sv60P*9Je&sL2+W-+I7&=KJQRL4Dea1piWCm{_RF|=359j9d5Vm%%T z^f$ObS$6hB);uHK+JpK{6xQK`l39P{p+(%-68)_;y>D^P5t14JA84>ccj?;L>Au6? z8LtRgxgnX^u^^FAgJWpXq9Iyx>kQe8Y^uO0aARp${EDu8tOKbMT04-= z6CJ3cBt4zWXVbew-xiV`$g%)a)C>>!v)JAN@Xprfm7(=)^O|4==y?X|RL~;t&%v zz~e)^7DSn0gFB*?4FT042o$Xv*Nh*Xm z@4%n|0XNhJdM3g%qg@OIFkpC+677;)RphYW4$jJ8k-#t90BLH4sW3>v4XoGlRE5BI zn;AY~{f~J~qM1csEX3r8Lcwlb=KtH+*9i$(Fcl37v}h}}q+gh*|8So{J@GKjO;fag zku?bar=$FS`5ePnPPNITgJkr9&$OHS%=tpUaixt~*(|PQzy4P|)@Gh2Y0?ljjjI3k zW@^y3_Yegzn*6{r^_DG3a1*hFn;lOz3&J;H{)5bg$pum+jRlkKe4Z+_u!G>F1fb#m zHNL=?O(NZSuG>?46KcfMe~p+fz#U$yXv~P6%W_#>88w&T9(xJ=P#J*?N)Y{(xZ!?- zY56e$gb*zjOPnA@S92(uK(K!L1qHG!>D&rd8DTY3z~e(@!zkNBufl^ zzTqD#kUqRPAoh-lL~Q*(sPA*7HOIMvQRKJ2`Ve1gh4)Sj8Ar^CN9;jMpC}h_5VG~n z_cI5&71baC#bxqT^oTDovsXT0JzeC_k4QVud6jhwfZfl5m*$XyxN*zw3!g8SHEi?z z=s7wG7z3Gr<^kqe)$^l6%HUt>!%={@r!yjp1pX@ewdwFZ&kA8a+z^5HJDd7WlNS@&tsXEDJ69Us1DldyY! z9(bSU!&_K|itwDaJY}!o28d~N^)7e)-Z;{<_uFq($s-{bY=LSOY)B6}RdIM2+go4S zP$<^wuY+Mrs4wO0oU_p*Hw=^W%CI$48k5^4jRxC752f z&9|qH)9&u7@*SG4b&z~Ha1z!N_gzQ6?mT19FV`xI=BnvjW!JVldiWGiDVI(+k}u^o z`05bInd7N^b$7+#!Wob8esxiar&@Fh$D~O(O!lc)!vAeWHeH2kj5NG!9s3KHQ`V{3 z>iOpQUMsJw`?rNm1~EnKk?9C|VndBmCo&q-0$+cUaFacY7JVh;^Tz;@!&k$!hdb)U zO;4L;8n(otBc;-U7C28~+nbk&BXM1Nt1V5tG8d`Jw->$ST70cmv*QI{@CL82UAlMvZe`ASK2TjvpobuNYCZ|| z(gz6Bk#Np_Q#|^o_(*O9^O6>RprDifnYbx@D-)Q;Aze#Ql2bUkSyzmi0cBp2E*P}$ z#Iz>cD8&aeJmmHX!?=kLXLikohPhPmJP6_cnP>p-4v#u*mYXOu{}pdY9lk+dvqiQ) z{Os6lhP<<+-vb5H7i3RK{~uLn3ts!)V!+c6!>NuEADvlEx)XY|*8Z=0$nFZ?vbP{^ z@`lvc33Op{WkdfU)2+%zA{Uw2%FbwyN?H!WU;`R75l*eBf0@rgOsIqg=UB9M0E3$A z=V78hlEg-r(tmZGMH zYz$iV?jmfQ3(Z}HyKcjwqW}vs;OgMDZ=+Tyw$!$SkhaGdYc%?;q=Ir&J>&(aGp(%D zkj3h)$1;$jS3-2T=rX^|iPq4HC!jYmFvMEVBHTD@g;$6FDStt2>87NXsW^Thie*;UXzpUDDP|`h= zRNB~D*AcO548cqD*rR|Z!lswEKLiUt76rohRJ+HKYKBeHK7+l{Cy0OXxFYEVsv_Xv;jkP?~8W02_W zV#%bP0v{#V+m674|SS8fsgwxQGdc5Relgy8bIMwhCZ} zU3r;=6sB5!J*7332QK&;l_ioBX8;F^&KPL}{bwp?%X)}2U$s}#DgV$o z#+RY4=Jo($X6C=s@~ zV*Jhz!|)wI1-HT9n%l@4KP6p-CvILx_kDfmlpczPq$-Na?3)qjB+(Tjd4j{?CkRa$%T^9? zKHg33e2|&p3i5X_%44h@jdfUu5Vz){B{{2bXn@Z+^J*+om^t;O+Qf#n=lrO2j)l!n z=PQy1cq`iXx8d0zQJHS0qe(-}pO8xa#}f_ZDoK%(Oi>@Ei8f6w{|0Vb?l)V00RgBB z0>!E}jt->!q%_fi?xRDVra6joU?3;-70SWSxJNOe316WTT(?|LuIChlKVcWE9zJM< zci7LJBN#-ZumE0S51+a{ypxP>wc+qR%;NsO$oq`!bl#opcmc z-)zPk^tG9j4$NamTf2-#)OOtnUA+_n)7fJ9YO|>n=qZ4`ksrSFAU{wU$0cs6M zQKr>eh7&^d<~dh+f#)(U=aD>VU{lVAeZ>6lTEP~Yg7xd*_@8sdac=w7AHA>P zg$}R>%C$0Llg!J!H8J8Y!cCtx()I8qdQ&8p7xVN<2e$x1y>jQfHvN(YpvmX@HP9bT z7E{4unENiE1aeo0H>vDXQmqwqs(}Zla8&#V@lY`*-&F1QPA8`&x)=5N$@+^G*gltM z$t9dqQ+}7BL*Il~s-T3Y@$a*8+0Nn}&)!*pXJs!y5k*(qiBS2Ow%mgh63gHJppW`s zhM@}eOY7kc%}5`~K~6wI-?gBV?^HVBPAath{XoMG(X;P|z58KRd8NnmV ze33hKKw|xb5-K7SIEfhuL@fhHu;xge%fjHPMD0^!F1ER~`B_h5_cL_;9l$^xvPW8C$8}eh-Z8>%25j>z6Ix zDLu=$-`+C^fe!GTFjJ-h6wvx~cvJdqLM{!$b&oVjE;+dIMGI+;VKdC*(qp^AsC!uC zFF=RYh`RbU|MhLOtw--5eUxHw8E<5oZkuI#%E9RZnzbF4?Yb3h7WhYmUjI$&W{;fP zdbcELj8UoY^u5ExBS`6OR*BqQ>h?ImvOpM3W@>^D3G6Pg)u^RxFi}nxc~)pLpDPqN z8$Y+?E`*z-+8Jn$M_Uf&&iX^Cywd-*^W&=BY)5*-p8oOD5!=H#N%#B7@{7Liw`7G< z^f<@NW8?unRH+PsIqO>$cxAhCz`5xf&*I_2%$9Ve!#Vhj8tm?>c-(`+f&wa$a210r zn$CY9d;lGQH=FsA1q0q#h>*6R7vm^Fn8Dpp)FqsEK;01sZY+R89 zK3>hYYDVyF)C}Gm%TyQ)+IMSeRh>+$? zyWw^MN~VpU#4#3G5$w|55RB~Pm>BH%pZzk&Z|4!}KiD)i4OWj#%Wk8V`_~I$dWbJ2 z!L{@gcuAvliB;g}C9c|F5fxuMi$FFBY$lZ3y7+?l-mpNFPXjdV*6$DAn3XuE*Pvnv z(h2vR>j}P5LQK%Ji=OKLEJ-`ifZ zC!ua&Nc^NfrlYlgVu7-uF8<%XPL%Y>*s*)?!+LA`;WAJr4alqdfUtC$3G(iLW(ODG zU3$-HX#jnUpHr}OVH%VWl1cB$hSgTZ;~q%o0Uo1DQim=4X4QqRF+5`mDulT*llGtU z{|1-`L+PU~YCAs>5`HO`>}$CWU2gzJK!d)8B|h)mgt5wTO|Q1 z1QnP%u-<>LCSKi-+O}(-pmMZY;h8MaEFHDu)~1eBf+x?#1!|#uQ(KsF9Xq@e89%2r4< zsP%IC!+fu(aMP65HOmfKCQfQiJ2+L0VjCq(-!rRpf)lbI?7*4jgC-2i`>xct@D3`7 zG%@|C&j*3PHu;z)J=58D4pDt|#vZ#K)cmoKLuvCABfCyr%>fP7p~%fcRA_IK9Oy;4 zkB~lqHK|@|APXig%9PAur6y0My{S>Cs#>>1jy60up#B;Zf!vrsGm^MAP`mXD*y(S6 zAI9UGk>ulC7s5IZ`lb1}(kondnN5-c`AUSTD{7(EI=?{sV%%%OB|=k%0Cg6*U5xU! z$(*Iwiz01I2kr~=OuD07 zc@^eX9YR*%u!wsDb(a((r?h~MFSuF0Zrgmj(_F1xnE&M$&|hjLxh7$OZ_jMpn2u3M z1Ee-I)3ULHUlK-N7``qzvTP{R*v0rLNjy;SM2;!Ug2LsAiBfHP<YEa{_Z9-F-W**k_M{`)c&F9P>+e4LXw%;!pj4ZDMJ z6S5zs?;K&|(T$dyM{wK$r2q*ba&X9q|2)d6OVXNQuvj8;s?TnIv08u42P}~tvzf0iHXCqG0@cKf<7`t);p9RsxzRBUkm$)Xv~5F|1%4S3E_3M`*!<=V*#%sGzz9 z?W`@m^!&(=)*qFuqcdnd%boX=xD8t3fPxG{#xR>Bo8;JcGf_Yj3&zJSu*2rwJe2fe z(a(LIFCFrWlhpotlHp6ELYvd&5$fuV(L);<_8B(H}UOqM|9VX)9^6_OWlv&L{8~l0`x`TmRFe?(Sgx2TO^JQJZa3+sGqT9Aolravi7dH zDU>IBm8bga8AGkozGSIodt_AfzW@ z_54IBqxb<+O*K>e(q}d%hb$Z4&e^uH-$-rCrelW(*5&aqierUT9;H4k^b7J??lvT! zIx@IqmPC1?swa}QTY;m z-aqK0M@a#WUsv&!1I^LkD30;Zl9ju6|5Wf4%z+2W!g#vP9@MFX?743OFvTUiyNcI#;YDW~2R4Qro zjd1&z!|j7Y@ali=SN!U1WNfVwh!NxBaz)C6s*>9KrZrW7g?;!{jq+fgt}M7Q1^?RFo8FRZAKe5R-Dxb6Aq6L zl>RcX47zvQ|y zw5U|Bzl6wo>!`>bA){}j;}Q93`K<{<9eTJ=Q(En3N*wT9M|5?n%|omjx9StbiaR8G z7~=TYETWg;KO18~sJZ&9``l>8>s_KPayWeJ;9+IPI=&Ct%#yci%eQ!cqwEiMLAZFm zcI(o3ER*Z2Qc+0QKIuCgb4$XxOHnuLA6Z^%PNfo_QJP6qF(wT{th{1T58zwf`kK|> zvtVNZu5EwID_1LyR(>RJ|7D82;lhQS8s%(4B>Piev2|>5$+hoy>4IO(euEYD&mr+1 zIHe$$yU+t}D~oCJ!CktC4PFkI@L?hM-fk#ABr7Syd!QMU9df;Fx(gsX!nD72S~CAJeIb z^5BfYK`1rT)MBR}-t|gP+6W+eS!kDqHkK$%`3)YYl(tqF!=%L!{*^_Lpl;O_yMA!4 zY)u;NIXv-nPAkDl&91$iOF9$QdS!OmnTt`z`FGHg*G_J~A(J7= z3&gaSzD12Mnz{p(bMtax-%Ot9{Mouk69uBF&f4a4biVl?U6hV1(Z6R{cK3>FnU6B* z1|$x{t|M;?Vj9*W5uMtQ3bg0j*=lVJx}oJ|KgN};GgenWJjc{Z{wub_$fScydE`Tp zK|ezO4>`Xc4D*tiwcUUqK2L=clS}Gv{DB7cUVC^bFg-rmH4{^0H+NDDY9Rjgju^k? zM9^U|(@U|?xL&teWzM%A`-|}dPt8Z!41B02UCjaq@oFMZWq?(H^y~kJxrqGi z;tfF?BN)zyx_akI=SE_1yHrnx@dpMs%|-n%qHrl8c z2@*OLS>P4P42A7?NQk}g=+rvR1mkb43Q*eC~H z_=b=({GHDfI`H-VAvbe(+2iA31U_#|Eo#r*KwLrDy~1ln0|WG={&@iE#xs&r2x~`7 z*hwgYHnIM;?@gBlbFFE~$Ss!@2WYSe>^VU8maJt5&-kdNpWDuY^wep41?MY)d(~#2 zsJ{X1pLGT%RB7dV(4^Z*ZS+Y0M;cwEC@HW-v< z-oVcR#*Q6WHPE-W`(6U%-{saVJxY_2=^3WCQ^VN`=N(FZ{$|Ry|NY96_rxNuINIXL zF)!l$bP_tL%AC=EXBBn( zGmvL>K;3qG%``hOE=4W%R*)lP=gh~*1DkMp8=|nhcOB4jnpUZ&`KT$EM{>hU zBy3{h(_ykGDcq|Bgmr2WS0$iVc|Wsj1-um_Ryg2p>d7UHHCglo!wD=fWJ7ekE)MYt zwy2gV$FFT$wAJ@JFIS3UoLiNJO?#9rcbpt08)qnUn~H)$OAAiUBU6)(j_Mvop6JX} zo?bFUqeU>Oh#)1J4K-~@HGDOW+QFl21<+TemZotm)LS426%$!i{Wq>srue*us+HL= zh`6KEO(H$^=z{bwo-1P;S+N{V-imGYtOn+QE#Ht$glZpK%a&vJ*Ve)Fcofl)JzP); zb+_cSY080?ur#{jE&9ZI{V1)95A*&VX(Z;PNQBzRncy%oO$%5EE|ZJ5>K|LehMSKP zSCc$SMmI`HqN9Cp@2yQ$NNMIcSQ9N}L#W436UZ5fkN*763rPt9fz9_T9i?@wK_QCi z_xt9}k2g-Y=Oerp*rBGi{7-|SHEAO|esffJy{{o@P|T>m@H)1~W~$n$x5;rydQp%))VG3YH*!U!Ji6164Gsae+TfHXK9M4<1WPpVtFNJCK zs^C-mM>yZWJSOCBJerHAj?m)?_>W6f&QQ*WUK7cEVyZ0fZAQ4r#cl$zO2dmzt2Dfe ztY05|mOT6y7Jp8+#(>XJ)1~|Hpy9B;TyiiLCoO)Xzn_t!?`!5Fc2ctU$>}n@)@@nT zF@um}*`3gb4U@}#Xj^Yb4JN+n&dx97m}y2KH(r(VNRFo@t1mr?T_GckTn6Uo(+Iiy z4-{M)X0Pvi%mBYQ(>+e30`DcRj7kgQ&6(PYh`QWW`()Z=F{o~T(g3E zz}(=oxK1Iq%3~+7131{ftbHv*JSAM_(JgK9UvBm9i=nObJxy$<+?bLC3wLnYJ7+5A z0539;r1&~k(~G*su4&{tKR}~mkky6b)#G-OLcwvyhDc3N3|HH*I8H6Jcy z2I$_&4BJ8Qgr!RXK(rHHJra-Qsl>DJW^c9dF7OnkITFI1O$;GdW$GvMv<5%R5R}ZT zpP8}^o}k?9efp}`fyP><25K%&Cj~YtVAwqG@L%rwybJyJw}dKnD~Dmf^Uje?R7|)g z+TMTr{gA3AH5a_#(nlnX1DZAdF#6662jySjl?F}eEml;1ZAHjGDfL{5K;_hc8yj6S z%jJ<-!+vShGX`k(i-))h>zmIFv&zknmcCQOeRFW-P19(c-PpEm+qRR9ZQHhOI~zOM z*yhG|vay|;?E5^o-umjRx_{nOy1J(|J=1eKbxzK2`qT zJnEZMF_{m`!^^Aax3c(QA5f_Lp+MTvGaA-iW!|%P$j)FN>wNby12`^m(aFm0;EBbw z7`Av>%tQL;K4bSO?m*?4UlJX5*b(}65<-f1@F9>?ckKmOj;=8C((v9`iL}}7Qf00( zy8f@ z$Fmw*3xnmz;Wi59p!FMd^S_}Dgk$XChCVa9(}+#j&e0F&NMpTd9uaqncqjviIhoKE z!6k>`&7YT`k4kl&qXEng-1}$NPoaSYmff41dMPJJ-SWO{Hd`6c3&p1at4lRXt2yLA zxh79py}9LY(%(wrTR?bPgi%dhu=Y^l;{TbnVr#QAZAt7x1uH!$=+3$)dXTF12G?}& z=sZgl)2b*hvwHn*@d6Kdtm5_Er{8~|G6pmUoFF!x!|?1@|3>D~5rC{!$9rA4hnF7= z_Ec#1SzPguz7TNu(l5;wgr@!b@gp7QoHC90^c9FO9sWeSEeT&-!`F;l3z<)hU0LQs$qKOwRmQSTH4^_uap@ z)$U%)^x~habL_N8*}X7=U9|4}$eRK4hwfn*BH{7<~VBqa4H(Jd(L( zL4Yykc_imzvh(y)3sx@Fh@YbYER(Yqo{-T&*5cximz-&=ZR50(*m4xK9gcHd2jy|9g=&cVJadI(Jjz_0o=`GWvW7iOAocl1MuI_d#gogSX2#TvoB z?fHUbxy{rFagrHATzTC_0I3s{F^JD6H>{*xOIF-~!rMD{JFYz~6Fop>Vk!p^=*F9D z(fB!_l3MV!@CM7*?RMJM+V;Z}2fytaxRKX{E6&qpw)Tstx@~9815eZ|h3iVVyLfrd z#wxnE@$5dnVSSt!4{BpH=i6u9P&#i438!zRW@oaz+Q2$U4r>So-1rXJR_0r;JE1D! zOn*Kj;3FLrxBn0XH?XynT8O?U^*-HmxxlSPjlu#%lALtNCg3#lRU|K;D)$1!C!QqQVOM`EPhD)GKfM>ZI#6 zAr&)=r2Fu$fKsU}$rL-(5Oqv+^2yh5l-@X4657#hq%OaI2^_n?^vaG#>r6-vi7M#$ z6vS;P)+Mr9&&^s5&0|QkBdptQi9aRXBlPkOU!oq^HMj?5vFq@WGlWG?MeK4(sy$*+ z4!n6onOB}=2?pTNwE?UiG+3=R{qyT0`$f7txOM5$dMvFa2uABDDC^u#6Tb6VJvej+ z2HCA0aE~`FaG>6+|E#bP!2n4>^?GeckxmZVMACJp(oOeEAm7YCqJuy$)!3G-ZL!Ao zHW+EbMVHFNiZyUbDFmeLn?6sS?J?iR{+%MrVG=N22_cEZxG^~$#xNC6!1JnV5;fq1 zZ)KRl1iE^uW%nl~tfdvNa@vlxTr*bwc{omUnFWrw#VPX$<->A#*7`R?mXGlbAUYXT zefkVO%MPj`(8Y5fBdto5b6h>!l?W}~qM+f<7y|woW6iCGAS_b(yGSJT(on@o=24b_ zeOB=y1-=4sf*@?maCt|mD(M^h5N!0DCh^ilw*DCfIPc8QevA^~XKb7rTv z`nk1~o_n=1I)-9r;?C&N@s2A(E8K9(G%dJRvsTNzRCTBWQe(jyERL1=%1vF2-wz-C zTJ;&EYq>Wq>>8}2%=U&inv3K;R@W07=8{^tLN%C9E(7WOEoG7tP0Y2R4QoRNv7RFd zd>Ngu8#-gzyS0Hy`bhDyH z{SwfDr(7Rij>)(Q?w2jm21%qufu<}o0)1@st-Iji}cY@uQ_a9S4xUSHabnL6%8TrUHz?#4ciy;e1D&IYM?wCPK_k2 ze`bKkSfzTEmaHHwu1mINoTtZ3(`gLW6oiw0_Ok978mx^87q#0qn@DtNI<+B)HGasFIqHt;0+P0>n)$ALH@MgrVl` z01_UTBAEhKecoq+yx+$KV8a(5$er6Yl4fnZf*NLV z&(<1aMqTuym-)lN68RRfF>IX~#Gptq}Q5J$X2f&>V)U{dCK zBEZ_G3s69ol@-rLG*U`1y965tM6W)Os6Z4g`E<7{pqj{}bo()Ma#NxAso}xB%@E>+ zg|thP+!=Z zi>pqsCeSGQI6;-?8hF+da9MK&_gnP1?$3cVuaLyO;o}8SP#D%mIK=IwT?SS$wK%dM zBUJHAMzn)9`y5U7Lqxi3Q*G_vvfK&k)x7yD0xD_AGlq^srJ(`SPAz_jzLK!j!)nfw zXpAWd`jS&QD-dfZox+h%CaY_i)90lm6Fpq@eOlWM?k%>-qQ0S)XKAAZBIKCxCKr6l zpAUyMsQr13FDEga;-YUwBJNLyyA7aW|83_%2Nl+&g+9_PIpfd&Tys&2r^4Pn1zFyj z6!@oJ0pWL#e+Jy>`F8mOrRWa`BKK$V0pae{xYjk1-)tDA1u3JcS!9K(rZo8{olOca zoEBP+wwrjXvR0zqM6{W{Ro@jTpY)83S`f|Dbnp*}g^hll#Wb!bH1U^|vJTwtN*%Kc z6wy!=?+rmxfsbHKw(e`0UpEJHXSny~8&}hV3aQBWf~LH=F>&M`JTYB1moCbfvF-aBg^Wsk7pT9)7897;PU|Jha)8Ly|tX<9)+d@T&?;{8#&z-9>jqXx20h82zzk9s%j{k9%* zgkNg-N3*h&fb_2t`;tPL^)R2r&4)z{HcQ|+Lb@=R6is8|irFe|q51XtB%^?OfCz|8 z+1+nAUM03LAm(Tt4aKC-vw4H_S!Iki!6&~gK-i}2sfr#}25G*5DYpA7pD zQa;HE>2yy}AXVtj#!l;GHXPj*GuzN*af`#HZaGU(bc_g>XaT_p9*bZSV0q1ySU7`R zm&fIO9&uX9VxmKg%NlSpxML<|zCewmFd-FPZiKyheh?x&8Z+Tu^)Rdk*fo)0{oecZ z;ueRFvsI|da*LJ)N|xW(=Zxir3!7d@fNCq)bn0N!rfBnr;lPGhAY8FMK@YQd-PLtf zKe7LM^g{1YJYVv*cTF>M7hUxC9}meVJS4dqEi5BM<5PP_e4=Pen=_=8z*w~;Hlw?C zW5uQ%=v_tM+ekc~I8D+?a9P#r6)ZDI!k_-co0=!^(UO;yhU93X2tmo6 z3s3XuxtiS4#$mk`?24-y!MrFBnNdoIvUzM>Rtiyf0mz=xd%EmxIU=96T-luVTNKIr zEN4;&l!#c72I|e)@og>h;&n{72SYVpI9Qu1oxsCdAqtCGttK(SDh&`YaY1oSoruhL z4PnBW`MO~PV#prGNLQxP`^v7C(mo< zS)em4?4CUQ+4I3HPi>d-W!#^H#Hm~kQm65XWgQE7o%97t?OYhB#wO$yNKTijT~*QS zC;~7FHZ1b&v!NI9MK%w@#kqSd!Q|c#DV=?hF@Q-g3u$VOwMhYcQGKE|$l-Bz{RP&A zXTWsY4Gc*PUPW_PY83KYa^_$?SBt33KPUMuHEqlS@@o_@8Ma@$`+{Zkek!Y748tZ+ z!w0%t)27!fmPPmjF8?M)bDtF_@~xsChVb!o!QoAuW_59Q(zu70yTHJ{?h(=wYaTZ! zVeS16mo`u!HXSMfTAigr0Y|mI_n{3Wc-kl7j3|UXJ#->EZ$S{>p$F?Ek2`ETPh-^?voPfJt?GxctUVf6T-(m~Q#sTet#dm~bb~`V zgGdge4b+is`TL5OkRREm1YDr!uRTg3*aeF_;mejLofi#&hR6%v%mJ%?bpTvzd}}mL zX1&2uMS_qB>Ue2=G5}+0Pn|c#HuZP=w;c@M^@$Eaqq4uH+w+vxXWPt}w?R!EwB>gN z%S?UVd#prsYU(>)+3T=;>48&rq>{}tzd>nTBTX!uaOp>Q9-smqPq=?8jo3Aqv`q*M zCQaVCJO{fBUHm0;;bLt2KE+vz7*|EuH86rB_t#ex}7-pf_=q_8lm36ySeK>A!cWkP_ zn(WA_JD(ypA0vE#GHd8BZ={_qJ6(`v@dnj8L0=@6p+CD&$@y9dg3xYPkXx~aK7RQT1%~ zjuDr=F&GO(vfe?&3!tdX#fdCAnY?|i$r$C1gcQFwZ%?}r=aS~j zmcPoqQy)jdwBUjG#Ki%J45-fdE??%oYM=8voTzN%DI+IfSN3$`87XSnF5ebpVvkd& z)aWkF7P7cpiH$sk9!%V(FDJts8fnf0HNrcu&xc-UbJ{)IU6ZfpJmrlxGX7D8!`q_L z=o0~Yk4F)(3*hJZSacn*$+Jw<7CJWW0qtn^DULR{_sF@;TX$~vNKeXngJQ{dWVn3= zNn1Z3vQN$}NTq}O);JbyJE$-38 zSFP@hP;67iv>e&1w-yfspQ1-%AzzRHXHmt88MW3NjFO=tgF4F&sACwx zdCeEuWFeO*NjgJ`A^dDIpH|_Wkji0OG_uac^JIw?@(FGr`vzVC=&5R`F`-VK>1nvt z-|~vHLN%g^F8Zl%qyYpbaXVAf!Pa06ax|hVL=sSuc;{>@k6dkAI3c(}p-JxK^P6K% zXTUIoRnsq{)a6X~dE`TzTp5L;uIVMW9_-JXA3}0XKjXJ3gd}xtm4{K*E0*J}0K`SJ z)+$h@Jp2hE*OyX~<2(|7*Q7_;7@UviAe&ULT9>7aF&PaxlwL;NSpq+n^saFQ(r5?c zf{Kos`z4o$P>b{i8=K$>h|rKRNmG}-BEaoVF#)*aW>9y8vQ>&Moo96ZxCJ_`ON7$3 zy(Vd^7T@}O0}N<$OAco%8oxa!;hMPaOyg-BtF%YDGbC%59NB13#%HTqJ@*RPl(R)~ ze<(-JG#0=lgjubF($Zc-6A3v-f;GzL;&@AOLn1Hx#1oWnW9ZyX=%5!Y8s8qE@!m7f zTh07Em5N!FQFru|U2_h}tczS&KNu)Ax^yMBmyBu#IV;Ces%Oi`j?1k?7E!zC%kG$B2z4mvlFz;lnkAyr0#Mz>=kTcQaCC%aY2h)#>)rHBPFwS^-HI7;iSy0`Nu~3l!Q8ih|6IjXZ)-SQd=4(!z8oPf=?b z4JGN_q$)zHry>EOHfe`fqM2p9HvdX`(_K}(ucM^GQ2@RmC^J`RmaVx9|$O9w|A-fQLC_Zb1{ zI#`mB&wBWgYR+hI;fGwfFA__S8Q|=(#ea#6dPtbUFE?+*C27qu2ZwqKmRKbhXq~pn zEBEWy_(MJ`r7?xYc1jM}14J&fU_;YtkLbx~-GDpF-g7RJZF2|)-Lzeopx*~^KsvU5 zD>Uhih*NKc$G-}J7Ka=gr$#owycayJ{Ik%H+%uR`{bWQC$U^{z{n_bWC9)xk&JmFr z`%Gk*EaH|_Ky9!VSj&ue}IW8rjy?glv7Z?8PEpDdtK zW4cQFtR<~R-+>wxxs@LzXvGfS-2j8I5zDj!Iy)!>i=*7X?8+d(&Vd5}5Fduy4q<#0y#%&Fw~jb9$G%4;fI+XwRa242gi~kA>=e; z3fTNamiVsz1eLJ0t}@Y8+Mn75IlgEJ1Ue|sca*9{qlazy#R|rY98iX03U__*KFj7d z!<9)31+1n4%AnbHfY}`-J6;Bw+9RULFn=F*h?ja*TQtYd%h8JE!myf;?Q~u)X`$}8 zuLFmOtf%t5ZK$uIsS}qXSyneg3#d6SsEu6Y(OODBX=0zSG@>OjZHk7dTx=1%-4Qhy}XxK=>ee zR!-eL6fHMzD^K3X3}FXG$9Ut`NN^2L6CQ+2+?PhteH8*dAf@M67-4UKXhU=?)(Snk zM=J@*VAn(49P2Dr9V3enU)@7+=!Z-KSml^F#^<;#P*r}u^nDZxk?U;v&{(lr=igTD z2i+MphFd}@dc%6>KHA|jbII4?V5M5b!hJ5oqF4r(KQ$2<70N5%t-s7TepXrLW&$}0 zD1~;RA^GE_fLf8oF3Een ze@r4cp9AWZCkLK0D{Fbf9px-ys8p{5m&ujkAeI?4UUNsx=Q;0X14RHW2B}2$vF;n5 zq;`evjPHDkotKi)u2L9kz2@sGa%a3+jB_Ya5d*pI8Nl$^F?hV%0lg%*D6Xr)xL|K* zier-6E?rU_@9dEqRDS~4s!klgy(hU==dZ!lZo!uWgK#FIg05Ip=^q-Ki)4SSA)RVx zDHxX9VzS(849^<+&aH>*Eaz6F_pKA^o4N+Dg{3jk+C$VXgWK#dyq=l81OW=cbl}~K zD-fpTHoUP78gah+AZ2O$M0j@L5n?Z4k}r^*Y5ndx5_B)xQ#TqYuE7RJlY{n4+PoAT zT<*#t#eE+hNjh(0(SUDyZW}aZYbWjT&$6lp}#4Qk5F?x*GG@;7E?i zy+lH*`prY=-q8Itg=8wBYB)!EX^?|)G<+p=|KMyYhVrC#t5PdEs9FC60s`>V8K|(B zCkJ}k3YYZ#Qg8~%phu`Uo6(!-vAMURU1VMw_5K>D6L^P4(~*r5)J=y{BVX`X=tx|T z9|EU{^1I%^m96xTAs2)|!siX@*>po83~{J>LO2B z;#7lm9(JXB8V))}H!^CnN+38$<8uA5_?^%(X!>gkcDB5BgOMcqssnJQ*c)C@)RVndZF2_|CC**ZD=%7jV1a)}Z7f00 z+q^rUM*BMZeZ9f`hxPV07|00U<&q1YxW<%q+k6NVVrsQ6brZF^f46md$N^GX92<7*A~QcrXpmSmD-CnPh^I3gb59dh68nVrVi+_9C*@Q2@FbAK zp2{}JUDvQA_AEt2WmWR&yyPHLK8Yz}`}Y2Cw-0mm8*oNGU?7ma6~_Z_QKiqPxq4NU z8!|CDo4AYP<~|0~_Wc#RRcfn=S>3@rVi?m^i^Jb?^7E3O-V63Z2z13_6abpW4aMC9vR}`4CKK_%NYIdS+ zz;jOn{$~R_W8C|Reo-_=%0pT4Ui)Bhgwv0OUgzkj05S#ATpvrn5w*~in9{VD@@d_| z4GQaGSg}?~Pw5YRfHgRAlj`XZZk-m=isiGJlss+R0X(ZT6!tJ=7>KF^;0Bt*;(3)z zK05cuB{hY#Hc#Bum#yG3`w4M=;5bBm-3j|e7~w0zJx zT9EX}aS@An=52o#rn^6gPco>0RIad*e)%#d>&+axHmbKjsw)o(fYx;q`Kd6$A69cu0kDDeL$(J_%Ltw0SVFA! zH#=#A_taxzegqTpgqbhKy2qu?%)OqS?MJMfYOi+*e*u?=j&z zqf2zm2LM@N$?w5IGq;XZGCIk~GqD`blVG>pb!bdueMXi}wy-|`gW%{S z3Tq%{NgA=JNp0SUK#t=!2kGZ zx&Fd0$PU?Bka4biT)lX-a&Y)wHdg&{GB!bL&H_htMTjIkNqR`NeSbA+jmdQ8%Ir6= zlf6=({+5@hwTq7yN;OG*cfh?arspSR87H~u1@hJ!0;;JM+h02Yh1a&|AWzPkI~J;Y z*VQNAwQRHjDFP?x;Q4y=-kSHm{-z*2<^{dQ+=^jt1A`w6ow7$=kLWTL2UWms6TA$)%nZ_*Fmpb0=;uEz0X3YsGuGmY zkF>Ap5FH|U{1gK+Kl?jFAx=FbY6EA7mH>V9l8C#QdGaWTN->}FD3F`8uuw&BgSo&y zGmO3;@w{AQ`;4NIv_ygV<9M%KRU`E@bWaB!dhj*RmLAoj74Sx#ysC8$!-z%&Kv1kp z-+N@K-AtQ~ShUdL-KVnJQ{EmZ;d+V)r`~`Kw&i2;iS;@2d+o>(i4b?=(d81eaWX(9 zg=kSb6ZM8)uEmZruG&*RrlP@0y?J%-2+*bbJpGPF_47*xO1c^rCDa72f_*Sq0* zZ&J@;&<#b=QIT|g>?PMO7>ySpwHC)NFoq^_<6&A$3?wz%vkqFqHCKF&o9~p~CuD&w zzN7$n%!W<pQXMGh9vP6uUL!UoiYK%jW3Q*&QDDp?l$frw)>AYGPOFgPeyQ{E?IxKwaUk0{` zr2iA2`a-B+RIWCPw*waTF$#|dy`UEqf2ntx!Db1fS%I#$^_WDs+T*VN*&(zXh7o5% zM}9%Q)*;a{SmCZCJ+5$tXTYu&^ioKDZc#S^McQIi@zHSYB8NgVDA^qy?E?ZuGu#-| z&l3UgN4llT=;1G;b4bnyZ522y1|1Bb;|Vk@J~oX|wwi6UpWmiG7LPDb^e7SXjK(*$ zgUO{$PAGNWt;Op})~nC@=sda)N#D&s)US5b90|hjhe=b|OF~g#Fw~C&U+$4YW7z{} zFX84|_x-~5PIJCZx5J<)bX_;A+Gg6K$)Ej+2rX*E6CfO7?N`eP> z)0-j);2aVWHT*V7Bi&Gy;0(1!8;Wn@T%ewOpFwtZXEKnx8?oncg$9sf#y;Ayb}~o)fhV>nfB)l(SLOQy?m0F5r??aOhhlEQ8>it+%HIbi zJe?T%Z9KnvBt9 zgz>tg(G$D3_xx`;m=605_z!Kh-m>)D_F>wt=!_m%z`l)_ZkYWF*?0Ctx$IF8iIwD~ zT`X+ea;PX_u3v{^ef$O_nW3K{++r!5VT9fA)3tFGadCVu8k7YkM`>S=x9Ba^M29Uu$i6mKtS@ zF>vIONRFcF8bH`2pC}jeD+ci&V^~Sv(vh6gA45yDnqH*htLu7qR_BNx!t|t0sblFv z)#F?9nn8#-^~6;8T)JkUs#WB1Z1}vt9oez2UVRF65H5Hn30LWsHXkriirZ}I42v^? zPcDg-n6Ztf`*68nPV)UP)j5G2;S+fo771Y;sk^)1-9qJgVorC`6%4jyU_57`R04f! z^J_C8f$7-vahu2|@whiPQ{_j8iiYiOfyAmblzHugPafSlb#Ovu!)k4zra%@=YHv~3 zIJkvCj_SoD1}5(&Lnxw&D}gtfuIT%P)0*Z;Z${dGQo}0S`O;d5@T_N<){f2GmrxMIC&1K$`xMixDY83u5p0Z%!(f+8QZIT>W7v}5&wrD z3jpyKrwf7=qI24xMjO7C5Wz%alaedg0RL&C4%lBX%X0D{=e6FI6v480IzrZI3U2Yu zgK^G)=ICdQygHES3TlP4f3#$@I&k-Uk!4>Pcb>Qn6m7B{zFncX6=NwGj#3VoqEau6 zH|DGZYC1pVBnA<4(->*>`cuYQB)08MDl^n_6Rxq$kS`<<3lQYD_G`R!YqVzBc(Z1&*i;NHXM%RXXO?!=pO36&X z7+q|K3PHq26YR{va43#Z0C_P4@2hi1^~!9Ys(fEcD#v;y2~bmBIm-hi(N%B47uCFe z#Gg}5zX}tWW>iR!0gmKTWmKb;hF_YP13=j|w)%ssT0^u9F!7$?nzH{wL^1#^s4DHY z9l}r5mAhFra3%hLT{>3deN~GE4oZA>yRRH@zekBmXjV5nZp^l|J!cXhyZk6IA;qjB zlFJdmVqQ;VZ?rgHw8wGwn*tHvdQLcu0~Q`A84DbYIeoE9UtM{@hH&RoO$$tB!_;$3 z(d))g4W1Op36mTt$C9N*^kV{v3iGmWm;)W=h7N*Ot%AmOB^lmrn^~V=Q8^e^RBkocO=>q9Ou0WEl3Sg|HZc9PJ_6 zKQgW8An-Dx8JC%VhwcYPcxH3i?OphT$yE!~>~zBu8@z3fY?ZqvaEzTNCy+F3^dYlR zbl!Ymqsi-~C#J(PSD9lvam8KI`#fF`WgrpZ9oB@wrQ^;j`z%yfJI%51v(=RagxWz6&eC$W`@t7M_7H?@q%bjErQA*SSiZ) z!$`!G*+p21cS{)=cj&0jng3qiv#v@WBd8Y=WrIP3ry9Z9f)L|nnjIZLJ-xMtfi@%G zZN834tl5HcWTqFoK*SxV&Cu6(neIMqiMkqNo2tE(bK12VYL5z?HFgVt3<-O_#Up53V^Jb{FS1*}RB*8>U zvt!V+45sytb`ZRypC8}ceSEP9tUPGd&MXPkf^#B!=GchzWh&@i@LEXepEiqQ5;$20 zRg?K*a8p&+2tSqyUv=Z{2pARMqvE&d-p=pKQoe(8jD1IE-I*lBsR$U^poDJrD#T62l*JQ6h$>VY|``i~}X&#he- z>JKJB<<)KEN-nf}RQJkk)9rjITD%%40B#F4kQB$+vzFE0;qnrk@QmC(smK;Vu{O;I z%fEyYOg)HBbZGkc+cm_8mb$N?t_SVBh<;{?6uiwm4NV@ zBdR6w^=u-%UqNP+WoSTTxQB`9G0ug;Mw)_RJ386Yu@Ms6lr*YfM>c0}OychL#S{~U z_&TaW@kp$WUQ0yZ&463{&()Ebb^vHq%ckIz=<}r}_KcdM`OE4g#m}m8{upRlD)-q4 zjt?Uy>WV*eh_P~kSuQ>)Q6g_K-;g}Lf%i2L8`uQ)Nrhn+xzpjV-UF!QU*0$M8OsZl z?M*bUP(KO7l4G0De&kykqx{*1Cop#!$8YX7CJXJ!aDR&GWW7^42E``Nq85A;Xyqjg zF&c)kRvjDA8jutoM^cWgqGOch4++!(|Ks^tv%RN#tX}I zZMP|o8Kn~>R8Gp)?YOk(QPj%i(X*FuL1uUN_$;$eYZumqmZOoqeOz4`>ybJ`?MK&A zx)5?yERSbA>_z^81t$%|Bc`Zzc9~gcG=#u*_!37u0PBnuJvC)@PF!5=U~NA^U%5ug z3+VP|;pDgk8Z{y@T;YDKTNvnwYTg?B8H1#73W)_$ANmQun>&m-qK2 z?=``Ct!eei@jMUV6r3K+?+C}gd>K8^fY*gl9Z*_G1s;Dj&LJ;PUttn~W6mPBsl-Rc zjsY>c?o4d|0sjGVvVL@|C1S)E7*fn^=@zX~wwR%G<>BJvtAmmO+N{KV!xquM6^L3H z?@S0M-Z9!yeXsRE(p{qCbmWw-XFC)lO*hdsDlE}YvM>E}udfW3_qQ*aC!!7BnE`e7 zpA)t1hB}PMH3-|J9}yf1`p6e@{2Gh=_uge*g1?{ETneR!ZY3R25^Qk`FtY`H^jb_|1k@sr6)&|}CIp3y`@}Hg%V7nJ zICM$2jiW;mMs8ePYv#w3{wo78B|i!PnE}7GiKP)99j&6Hk(CM_Gu@|=rJjic9wXCVYZ*N&BU&Xh zDOU=G(9~Y zGc(Iy{@3_xenvL-|LA?n*#4ovWdG`Y*?-Y2pYkvM7qEZ&|Cj#afBE@};qQ7`*#E}< zm;TiIivKH~fAJUp-!=W;*gti?=&!x`SN1Rd(*KI_?;8Kv$G`FYLx1^Z`rJ>3uX#Q( zGZX#aynoq$<^SLLzV!Ycw*PkgAI9hEzO=qp|36R6-`PK9e`oph!1$G=Pp^Lszl7{x zsr)qjqQAoW*TKK^7ysWG|HWUSe$jtzSiY9d@*kbg)cwo*!c1R!U%W5ffAqiXn3zAe z`=8_Wmrws6f2^Opf8i_Oe^c~7@!y4h9gEM@{WpBg`QPwA?f!fG71w`{|C|0gE`Q_v zO3nWQUq|YHA_9*Z+z?=l@^%Uwif+(E6t$u>9XDCS_!8;%NF=bXe%}Mg}&9M*oOEs}L(21K!sU56?fP(iQM$n}O}V zRWmEvsrYf}z%PR8ZMCs{Xos#=S^Hf~mLBcVu% z$AMC(QBnhEXPQT;N9e79@x?6G#h>5)3--OB#Dqdm3r#4?izw>ID$R=^XXRU;2f;Hp zhtf3$qqDQK>xl&vI>qM7qMD%A1V}8GI8kC^YWR?T5(UxSd&U2L@0JHmI-D7097T#lr-@D zSyWq=RZouGJ-+e%Qu^@_h_0!bvG$de$>iCFG%yn|7{CNv(O}GmSrOJiF~mo$0RUW) z^oOL$;RR#&EV}Nwq3Pju7BH%ZZ&6llRK*A{jOW}$C*mXY&X(Z^wcg9b3z%Y!i?bbz zbDg6tpy%fuxyT>QIW#E2G#^XOO|QnlrxTsT(x*{cA0q>-$=keio0_PClAI>K5oJTH zGdQ}25OlSU&NcM6ub&+o54H@SXjwJ0sL#efR)IBq6Utii62hX&>Tg)bS?_t5pDx%w zkF}?NJ-5W?yt$C>7W-Jw|7gfAs&6R@DGT#2il_zc9-lt&IQkG)ZDgc( zs;g~ae8ZXXu>t_VM#P-^*eX*~!}zi4C5zyDwrN}JO!86C_>mUo(>igfQ~QP~wC&-p zrNo8Z>Gj0Q{}%H0Q2rP*#%tc&dyfNP!$AV00)Wj(y856*&j8r6q~`gdBlQY-#Jlz) z`Vf*Akx&HA_rf`8Q{G%O2nVMDGF~U1nb*gV_a@|4j9&w5Lz1Go@ISJtn zCMhj0;>zQ?YtH2KWbd{k@x2kG)8J$3r7O9ztSqFAe%uV|wvL~l_YC@eEZB(S%D0JS zHAtD~$4+SFsB4He!o=L{!4xKA+P5*O6p@n3Otn?ix}lyQHFFtyRfBhRmyjp?r?eR9 z)WBy;UiPihjQAXG`Fh9rJ5?^Y%CRYa?Sj%6ID9)?HiLf#)&!h|+&su{vH?Kesy#uC z2dJnb_&8?Nvek#r%pzI zn#FbhWuQSpT!8dJs3!}1uOz}DuvLz*X!@`GoXmEvDu54y z*xP1k@15ntk0bmEbF1ZBqDfTB&D zBiCWrq)#9nEY}dAyFk5;GttzV*2-B`a+zUsKsWyp^MVLlmO)4=wsHl2!8Ul??J>7y z3hoYDubnJXcHS)#Ap5@f$ZF=z%N7CAL~wKa++VF|9An~u8cjL?o#DAgdL;@A0J-qy zg_I9hk@)yB$T0;k!Iy(qQDnVEw^^;Ldkx7OPc;@TMG;#3HqVxIgipM>13#aQl)gH?OQ3Gn99uAZQyOJw-sBIkQ~9Q9hJs!qG)4WfuM>Et)~gkE9XIp_?oI?2V- zBhH>yHapf-lJP4=PdTOqE_AM4ct!_k-JlTi%8>Xz|pMH1x_X=%nE8?5NY%TPNc>~1Q zCp@oHnTSkyZHp54^2Y;iOha00_#oT`^6hofX}CMHM*ZUR+QVLdu^+_WvA@Gj8mpgqaUCA% z=xmB?F;QmmC66ADy5f&CARgsbal(bVRUGqY_o@O;=TFe{`A&;`_Uv6tsd0Xs;Txp+ z4d2SZ9W|f)Wy<1MWULBk6#%uau_F66#uw#>Y*>X!(^@$S>)aT7iX0mb+M+-Fxc%kc z0A(e?iFgS88Xcv$SgFR+OHyz;Zz~>pXo#=0ob&$ALYKw~PW-8&giuHAOado~tXMu( zGb`n_$AHeBQ&1l8qCW}>;mupRnjzneJ};t&$BBBG1nI2A!lBz5#}0px+&>!geFlgC z0FT?LLEUQOjqQ~vEQ<4dsHaFZo&?02?t=i%Y<4Hdp7oze+m5v3a~su4q#l!!uvAR* zD~vpd7a;8qt}pf(=ue%*icvKS#k zcAhOO?;%jAq?PzbHMXWHD(oImEDnmw>~I=)EO`P!nEs-GpNGBDp&YhscCTVg*lIcE zX;ef`iy#U!GQaX;Uzsjp2D<^;=CL>fG|6U)Wfc!Ie5k}rh%|*O+TydOGM)y)m^5M# zP3E^2CaT!hT~2pP(2Mvz#T9~tX@{CzdRYck@|9rH01~bYty6*1CU}55GL&z)HK1;$ zf6DxAOw`;4BuSe@pn>_8y78xW9BnNg>m(q24oW2cq$)0xLQhzSogWcyy7mT@N%B>h zxYlhcW?Od><@)|HMkdq7jK3x^3?}PZ8>+tmIcJ0bV%nzOx}7ziAbV*A^C>ufsv1Ux zPUI@^fRF4))=5sf13!weVL+`566MOy=;-Bmf>mv|TAr2IbBljV4d)$>BI~t1#oxol zWgTmO1_k}OI+hob?RtTi?|M2A4nF(*IBz8tUtz1bw&GbPL-#5OLs_&#xLdl{HXEy3 z0?KGWGB|K+!C^T-QSVPm<7hd)!^Odw>#5=*1B|MCCDhz+7KU&| zNyxHoHadJmi_c!FQVDhkR9gUUeB?XvFD)EteEW0jI~jQS54LbSU%dF@I)|W>HI5Oi zsVOW(7uOuaRWjXZhil6e56!;nv+p4F>Xwa^V4irTjWB(AiFzMU{TaEMPPM${(Hche z?h`@jZg)(}Rmya2w?xUEf(^Ev%Hf6aSAk&p5zHJ7w>A4S)oIDA4=E4W6-@J-_FS=} zocV;j_-;h&{Uxi2$p}0v{bk3ekZ_(fIE3Fs@mWQyB=dsimWr;SEwO zRl7npd;r}2Qftc^J=L*<{~HoZ}H z*mw<2c@`PFN0f|~K0r7WL$(s4^H zLB1sDJmRp>;04Xb412`m$jZYPl~n_F=fyQ>=^b%!n3cVtE*gyB!hjJ94vpOqLQ2|`9RX2L)t@q;`NOBg5FRCI>f{JoFv%t->s9@Nv zEc#w%xNeg(ONL*YF<#gZD$C2eh@X|+8a60&k}vfm#W}9TU^hHxKeTg80(c-73xGq1@eD_TV|=IZTy3wAL@c6npU}6I#O;ML)tyS{rjRiSJgO z9a*tE!6@2RYbq>->=mQn`xSYG&57Bcp*iQJH7oATO#M zcwhGaYuGN{vE$;A`AT~#MzThu0WAJgCmuZSxuh@|$u@WF=Me30fT zJ0EmwBC@zwnM(ymsTOF-fPjHl2Zb;uojhrr7Y*iwo;Y8?Kdc@R)AvREvu50GHL&?o zQ#!X=0OJbR&YwFqo#H%xVXe{RIL7#7yP!iD-aSg8gPO?&+b_5Vf!9R>rG2cw_b#YF z`C{z24W~LLTv_D0&MOp1eri_362Jd8$z^b*w_BKL9jfhmMqGZ31~HRz(Q`OaMgcKo zl&TaLYGwbv>U&<64SfO^i6;mNw9Q6FH2B`b!DtIHFyNGg&Y+O|vA@ouu0yVtskRlp z9*!r_q7k`0wv^6e;#3*&v+&~~OUGBp6rnTx;Z5twIot<6iKmN!KXi$OD%Y(BC{b;z z`4&cBMOmwE_rtlSah0t$hdp1D?^Aabrqbs;_M-f@;LR9TIEJzWhToYv?y!wJ%;!O< zvv$7Gp*Y+mcqop6q6LC@82>_sW0H_`?I%Mb_-kRSG#qmoV{iq|!jq8%V)@_u6`MJ4E5O49Z<@;dmD%|aA~?0DL!as+`hC!StF4PLXX za|U6s={1zLH8zb`CJ(0Ro{n3oI%}TPRJugDKkAjp)|SB-sh>>l?lUI|E{$RFVMq27 z+&TYV;~TdQF?)FC#y6M{CHbI*!}(nJ`v|=t>Gu>)ZdN2`(SDUL7^-iiLD3i+i3!XA zHdO4ji}!`VNPr1(Q*xNXBBZd+PwQVn(F!3l9uZYe`?_LoAkXO=YR4$~)06BryQ1;L zFr*$h)`N{$0p`{Zjw11vb7ithufuZP%nH}$bG17Z8~;x5>}CnR&@F>?mdHIAjz?P* zo~+E0e?ZzbRePK=AVl%AVHotC8r6Snu(r3g7z=;L92&jfl9hiXDgS4raHCr?!`!u^ zmeD%kkLkF+RtZd`X1ItS9!-Do&Ki$+yc??nS3<{r zJo|GQ3%B#3&ON3`$aX90Z~ZOD=HflFv_F`kcX9qhipUrXAo_^$A;~M_!mHwwsdUlP zA-- zD2Px9w0oHEVz~Z-w_wA~`Qb8d*R7&`4$>!f?lp}Y2`00=S;Pe0 zszk+`*qaMGo1x0mF|SwS`Y^8k8Q^C|-bcm=s=lv>1&%3GIo|25!$S4mvywEsR#J+( zzouFX{THldJ;wr$>{2o0h!?G?#!MTv${HQ{=emAwy=!0N9dhX)+j!7+7&?*@meez`xp)gSO_JEFuxCSEz@v zMo+l01Ul{ZM2ZnOf0jGSLlgpQ{B(NSgnq&+K?%N8Dng*GT`PfQB#DJ>cZpOyCP zR1e+H*FJM_#R~M0FS4>#B(B?*4o8q=0 zSn)-FsFuI5b}e2{;^vGSI6b)z#Pnurh|$2HYS%tf3FH?1>blL z^yJ?D?)wB;0$l6rT$o;t4P@%WVBp@iFO+lPC78m&pm+s1KZr>*NT3>OH-FR(F>xzIkPDM8Ne=kHjRCq#zxXbrT$#j?2Co9YP!uz1gQ zD*}969SPCRg-vwfGZ;k~pkAW>(W6nTk@uU7MpLP!4t;!`hXA>YAa##?Tu3IQ*b2&7 z($Wa~U&mYZcSWV1?L*3UyQ!3228yFxwMiPjN;I;-@97>6+a|SkI7JFe6lYgObm?h@ zzDN#{19S_{Is6*{{c5k(d(9!iAci$hvbidpYGPeQvV!T`W+^UyWnivu%=JiKSb!i_ z3B02w9N6M>%Dgi{{|-#w^~1f!10n%@7!pOW`ig1BWFSl~QVn^f&tKslrX)+`>LnkP zj7}hiLyq_~b8MOo-1^A9p4c36UqwV!G6ly6xuEXCw>?*Xs>3!h(MulWTT3|xyHg+t zlT;!`|4(3#$D?u9HvX|IU{#(-urYmzJ7PTOF>OH-vj;M_BiPX z78>?2*cqtz1nR$da$-AR-b{36rcX+$%x| z5%0wxU}5ZtxB__#MpmeWR9~fp(h9oxEPgblP)XbyazSs50iQ{ViE?^K{IvzKD;tAM z!5yusxRrwyR=U2M~EZmF6Mqm~8n+pyE>wd~J2msx{y zw>TFqO)SA@Im~;_u%q>2mwt5SF%riDJ7{SE*xiOMEJIHo98BK})vYz>Sn`sbNs3w< zyAN~khH(vJt`mMFxiTU>qkhnl| zHwN+Z+h<>!kM-7Hw52y3UN2v)mE)=NmTW*UVpKtyLGX@?7h7DAaX!Y(677(2H8GLO zQDVC-4qU*z9|5__`d_|AZs~#hT+BxT7}CtHu@DOHHtZY89sW2vlvt2Z%#FzbgV>*z zG1EAw*PisVLn>2Saph5=KKJSKj?6tY$nKg{b)-fn%JS2LtiRwR&+@~(HD(B3__GpL z&SBL6~9LW6=St+!nNm@Aa zG!bbEP983mF(H7KqfuW%iDs316=I|kHN!7SDAScD0by0@7zhnIioRpjr<65qu~}iO zAg|yF57T3sahiRkqQLUS2f8EdDs6q=hSoCT2fdBT6 zX?rnqkgCX^$F;ctQgQT#{X;XgOyY~XvF20lJV}K;)prEz8J<*>xM^aXri{)j#4kR| zhC@d+^5q(XRfePN_JX-6K9$CkTxq9TyD<(8RqxtI$h^{UndBt#;I)Vr9+LzNS!M>} z!H)608(E;UU4WU+83dmB zLP$MxW1fRK_h1PLXLP00;5(#mOE9}17)Jj=Sp|NzfGBXNG6R>Mcgj{A;PCzsnQ-`* z@FXnv6L0ODPc~9;=lYG>1Rg?M?-z5Caj5ke?DklLy z5COPp#2&+fE(gT#h!N?i&@Uqbk1{cNbuQTTC?|;`ke+ER=Fj#vTOiLWoaJYL?usmK|9vg|0zo#Hn6a zXhYj|8KDuq8k5zFjss!B61sF`hSV?N1l#UG$`*(`%ur2MUG`12%O{_cX?w8E4p7?b z%`*RGmOx12EBZYd&uHdB=$=EWN> zHLHRB7$R8gkg=MgU(v3u*frSLpBsS|M(6c4x; zTult|aj-)c)_h5#=kaGDr%T=!%w^7i3;X-(kYd`bEb{DeOIK1`#x+n%^U)}$8X15E zEl_aMZdSnHAudtNo2P-2l&k3j_ZM(9qhuPeDN{1&ZDC$%NGt7KkfS%1j?$r`9}~j% zTZgTmlR#7C9-f~onCfa>$>-R}OMrHaA+&pQwlf>Rgy5eZ=&#p8iV?YXlILhWx(c)Y z>a6TMc{c<+u$Ts9=B&7SJpZ>*;$wVRfALJ6x-~+C6NvrXKp8c8{+^Z7-SM}rJ;L(i zg-wH>3i)~q!Cxe3AoX5UV!Fu-X`$auo6fO@fW~BW0$YRg3$~0zo*eP#W6F}$yiY-# zY>r^=kH(JjS|9nl`)X{Q?U#pjME3^ zyRY=${8@N=Kts|7T}<*!YyId*75IyRBuNeEp&&m5LN3tLKB(IE^v(7WIvIz%ZmfV< zyY6k?1_I!?+jdR0Id1%I?J3Iot z-Zv)pA1*Y|l5Sw*STJ-2-_EZ;a-hpQrnT?hv+o|C;BED-=QcM!B(iuOF}+e}#mF;U zb4jVfwzOEOGro?IETkxc9o3MV9(-sRmtsHkp<<@e5y;M^!2(d zR-)tDvEZj&o((c4h$W^-^N#CGM6~ld-(u&8h#YMiwHIyO{+?!Xr}BF}wR(5^IF#8< zwtgwS=*lZdY3*_Kmu}vJ#m)uIy2@FGkH?V6IbHoJOeY)!_^xdC3=in8d%wM*YOLd4mU z+E0rkvOdrsNhy?Z6p~*a@9aC^UOn=#qltDzFPf@CXuE<&Lg>7I?p(S&FA-n<7)!4=6NcS6#MYN{^RE6~Ah=&PgOVDfr%{&N2G_^A~a zg6<8wqgB8y@N@cdAmx#2PP6K85b?VA73nt<4TsH>rLvaubpbaD;Yuo_tX=iBNKxe7 zI@7Q1KYid6_F_&$XWh@LQ=!lT!@XImJ$?Iu<0eu-2sUIj|TH^-1y-)LZi-1P{@h;FYQbP7xN8h zgKScaF|w@7D0#`V=y}HAhJQ48@#7L+;vuJNj1goUC$86&fpU5LGp zUS&AbAj#f>HEql>|2+XA@RDjq6zV}Y_KPW;e8?!;?xaTtjAf1IW2f-+NiD{ zZ=G-tdV@$W&U8jI#yCz%jr8i_^QD{23111ZEwRXXWxgHV4_-PdU&by4+>)=EBtHe| z*U*$3T$Wg{_FBMsS^nk)^H`=a!uEd9)(L|>&e4M+t*N!YG;T5B#JryNj2RLyg8bE* z(^)z_26|dCVYR)k7+Du8L@1R3Jr##yR&Xn?bUXRi^XFWp?96XyZk4r>9|5REVCsDk zr>a5cBobw-i>&5CCTiR?A)Fr^RWIz+C{rf2zPrpI%o#u=8oztXO*i|Mf>#g_&ZPBj z+LP3Q>6Q1pkVRE1KT?B&p@IpvBRu;@oPJf3VnLWE>pLi3W3<$})LavK3CALgveOpo zgO?4ax=U!!Dk7kFDHmch)q#gr{7vw>! zlo{L4QDa>kYNhiSi~hKaGsIg`5UE@k1}IHQVBsmz+ND zxrvgf&C{iKY6;%YeYZv;uytL{V5rW=#DXx}6^SOzeo~ABUz(@V8PGZ_R=y;ax&KX% z4ylElMb!;sW|?=arwJgCeon-GtR0Uaizt@s1;IQkBJ>*I4ovc|ZUifdS~e|x53pRA zeT3IPrQt^8+mC{+I<^+K2=F0^LdLjdE!~R8q5RKxJh1zx)0901US#!gzeDIZf!hY+oWjv;$}G+GZNrH5K%eT4^0+&N|t|C&;CXAL^UL2LV$Mwn*4EUR44isJ@HM{;C^# zGx|=Q$mdEg5*i#u@JnwD6SJ?AN^Eft&M~HCpUeoaS+UY$8*=;rg&Bo3C|uB}IxJ+? ziEs%!W@=bv2)7ok^gzrME!FbmBtqO!;*z4-bUM8OVWRlBVO5|pty}s9Te$A)jS6oK zEkmUWMQ0{*hn(x+R_l3B0xYT;kFC%`gvE7wDV}+(2Cuf@rLd}@%i;G;8M$1(V_KfQ zaH4oadr!Uw)SW;kJl@V(1{n>vTT(UAJU6H(cbvs`NGcnIyxSHTcf<4+-r9fS z=WBsncR)#c`L}#8!p#N^_Uj_WmL4!5qn*^B?hd|xL?;RzcNgH_uATraH1pa)EyM}J z;uuh232x;$kE$5#Xr<37&(!FH#yx@@|7qqwv&~fmK1&^ccliaJRlyhoZ}@VWUaz_6 zd4Oe|jQ=DZ1Wb!QHwbgkE3+#}Z7nxzOnfMt&_ypr zY*rFq?gUWiUzRWSi*IR5E}BiRE*UgZ z;1L7WI;fFEn=yuqxJ(&2QXl0J4`}ROnJMBQ=Mw~ASMiO&nJjgc+v@nL}c2Z$@BI-uf? zEKj{^SYv^BA0oI!E4s2vFOtn4hdTlB#&|EXqx`(`%hi@k0;u4v-J$iFW`74MBF}W@1$~L4Eh_M}hr-j)`%eN@^|&zG_?%7g%1R1jfA6M?aOF zD zC9tGzaTKA#jPjjeyNKQCxd{z62&|rJR214%sRnCaB7&oz3I6ua(MjhIEg@B`9DOq> z4Ad|=Uf*j2I0f7(X5Y>pGi{&TA{OzY!WT4Os=eZ--&FKO!URCY->qugAW3k`A0E2K#jH@cusK2f6RUts>rZ9rg*w7!}a@r+Idh78i>Cd14|n zAA4UPK1>mL1K?h%p*b7|x}6Z=ojc3^nX8~Sw6|R^M|R4U6V;-8nDlQbD&@cDzTZoi znwu7S+g?(+5C}2$#zeV75$!Cg`3!dRi`30-VDc!L9DZO?7bhr}r(2``;e8RA_s68> z7e8SP#E%;Wj^x~kA%3X~D8^ob9fF6mbhczx;3cxZ^}F?SQ;fJ`c$)8fof>~~uGq;q zR5y~_i5~fhm~G#w#T3=+6wynP{oohs?#w;R7xXmH8WZJ45De6K`YKv1@-d7V$(9kV z#mk-XFV~O5MHsK@JIplmT=`g;5_3Ki6%c-Tr-RA9i^ha#8Vv93uGIXo)Rsi&l=}ow zEl9~)0&p8kJztmpaOXR7lIf4x9GAcS@<4=qg zs#4#0LL@o}Z5+;NpFFi@vl`n{|Hc>t5+Cq@0*?z#KOIPGMX|WcZMjjBz=UA?z4?rx zOUAbQd4&qRO@-AQt)WGbfj(u>3YHaRmr?Yh|ufjt<0^`El|4t|R!R#W_vb{i-W-_mLl~#k2pz??z^l(XIxw|&W z@rUPjGzJV10rag?1HnB$@1aC5k@nuTf$W>tuE2tTD*_2xhOcts^~!z}EfyO;Z$KC| zw1Q%+?6D72eBQ`t?!jjRYQ!&u;!cAfzKg?16=c3GCcbasBT&1z#Ks-^{tc(FE3vr% zVWv~k7q$|v>z8S8yD{6od=kaNZ8@j5Li-7!b}#*LvNgGV%-p;ts+lB1G)J9*jzdEj zx003`S)olPb=iW$iUHN(PStY^XNL{DMJ5=PtB!qk~8O}J>fyrP6hHe{&-RZ4h|G)-H zDs@!%1v$YTl&Y`d-c4Qn7BrDJL#6=c72o$Ks)cUEXCA6FgF=Z1t9(1O-8OVam6YE=BGtGARLVG^r=SzcK%}$jXpB&=?1Oc5#-0Ey3i_ju2YU{w?@U z!eDF6mUo|Z^vb|M{d0}a=Hbm|HqaTGLvi@qJ#~j9Igmyo zs3e&u==%qW5jx^QN@s%S`E+m|d&3`pf2;M*8mFfhs*%y`_SAPE7PP(IA5eA3%7};@ z?z!3-HIi12y=pfPFVzAxxG&Ph@9$10)!*1U9AHG0O|tW7g886C1y^@unjiuwD57L| z=g1T>ROCd+ciUwRDb zr0D3mlw}AA+KGMX9lRm5p$4KlSl=U}_N8iiqIi7L6*hJ2e5sEVn(Cc91T!$rPRux! zs9%CXpv$_T{RodAw0cxtfi#gXy|CPG3~XK=6YHI(2u;ENb1{81?QdljA!IW-Bpf00 zs$UW3oo(A?sh^IMB#(L4rV}Kp7=#W>p4bdo><4q!66lXb><9Y+G97a#y$+%^eN;2J z7c|$dgm97SH+l?I_m_5>&Y_grz^Ezfv0=|A{ZsskP$Tc<$NPi(pxwBlDEEahMSluN zPas$TvW@Vi!T7TQi=t2~*B#q)=(?dzTsN3|e{maGTlLAC=G(P)Y{7qyePWNNvd!lJ zqilqCq(ba`u#OLy#Vn(YR>Da-O`wxoMJ~V)dNjfn_OQ!Tq#MSiONdRNGOTtgv}|8? z=sa`5wF1Hm^h~>npUEHdhd0*iyusA?Pz{xgk(`%OGv_*99m(XErFv026_;wx@9a#@ z_0o6#z(q6A%RjWFBfdQ|pplvw1B&5zMt=V0+s1Vow0pqEeG}1af1}D_ z1xM!xg@)7CtHQh%i;fm$Sc;DM*Ch&n<0%3)L*z4M?%sWn)LK-co^0uf&hZzEA6YU} z2QKJKP6?);-pBc`-M6L6S>;ICO70RMhE3>DsVvw;dxI4=r(D9!gD&x)G2Q%}AHf^SaK{Y{TK> z!;a#_Qf793E`#_c5y!0Te*fQUkoqLeppnZ$;cp-lhNJ_;iFEV)Rx?qF%4H>6^=#7;7Q6RNlyqkLN=ML^sRai1Mm-9c{baBRjlf z?=LsTY1$JXKcZCk&Wf}?I687lfcjW)Oafg@ta*!nu<=t;T-ae1D5vHN-E>P#yZmf7 zANjP;op4)x9<^lx3})8QkD%AUU6{`!v|b@IGWUf-myjKDGkewp^<@qrf0HOt&&Zfq z=ZB2zDkSy|T5(Iu{!t@+;d;t33iVtE&Est<5TEyAAI@v(sqW>LnN$ z>egVF1nyh&b#e8XC$mQiYdREdD_Y5FER_8%=ST~{&#eeSHZ#i9s_XY$v%t!)3jkid zEm^L9*7mMB>tgh;Kq32DIv1uW2UG&IZ^H#a`OUv+Zua|fP%+{-`r<8My{h}RFcH=$ zk`~Ge7zg&GM7SVF1w7@vpmy)BIpII=8YN|qTsXNdoGuUO2FKv?d_Uc<;z7%cB6s>U zlHf*d_<2p3Pv`2rEA~RkNt0#Iezk1zr`F{`+LEucp{gZUrv^WMf~`JwYRR;*k$Ylb z^~}vVo*q#lBO;vD{eosvD^@hMZth_Fv3=F6UcnB7pS9SLjQU1FofT< z?98rvYxj!;7Q3{LJR*d}KAPu)wwy)3vF}z8Hx~Pn(OUk##yb`)E&9S-raXP>I9qUN zuSE;|h(Lgnb)<52?jR{#?}zjfNoQkHeG?D^5t0h-N}i z2%9a;3(~Pk$3}c72hMPUsEzam6gly9z?zoK>KI+J6C%A6sS1vRejV)3<*jKj8gx(if## z2s`S&m_BY9o4&|;so3W0hBPg4iXPn!Ma+g_Vbm@51dsJ9YHhl8`bE}BBb7Pdx64d- zM+P#=FCOSb<1SW(fpeFAE9iwa^qPn>zTX>hg1f<%gjtF)kF~)joat3T8yv?(1u>or zZl{gnVCCxMdWqB^d9v2}>YJz~zijJ8{)k1;s+v7hT46#N_4m#GG}q0JBGpr9YfL>| z2Ig3XbrbMw^J(ZA`+%`=kcMQIg&9#0R8aYOEx_S50tw>$m{oqua@Mi)I4Cl3< zkSceH_U z{1GoWC^TnZq|BklpDx;vi6xugHiF?i=CTZ2ZQSJ}$DT_~Kmk0qq{pPUhSC8v3^p~~ zu)UM`yfZ<(MW-gCPZZabuqIAvY26-;{YQR48gCi<_sg*znyBTS82mhy9lI}H{cAmg_#|i=vGCZ$A{fW#%&ch*@Rl~w>S+!ObvS-Td5QsvdK{DZ#eKt{>JD&<$XI-)VwtfxlTj?fB(-JsIx%0RyayvsxuGUvoCS3?j*P(8(u*f1l2#r4E zKQn5)2m0<(js6%V?B)k=G}s#%Z2jy?eJosiO+sSekU2ilaCB$`;kRAvH~uUqwt)_2 zS{>}lg_`W}(AM&H5J@d70-k*`AlIaSm0|g$`)bu&Y-@WBCn$cCeacDY0Q#q=z=ztv z?#U5$mCculsJMS;?w$?MwC`k|m#rppMK=ghz&D-2zb@F>;aN4}myy96J(NjixwUST zkf(2KDSATgKP{**R9+KKx|bHA2tXd^y|&ztZV19o5?hA^;>{N`I$Z7%loZ+sySEvd z$dfgCk%VasjCRd(E%x4)4cAYs5oEYYMp6)yu77FfTX3Pl094G$wd6HbsOu!dk8wsc zmZ(s_XzF|!C~oN5gSfd~$S5=QvVgnfS;4T~*QCg1q@EOm`GXnDWm8EfD=W#qj?S*q zQxmca?Fhc=XdS*Tupnw5qbJZXj7XHepA@rm;GsU+uUX%_@lXJj_pD)zTO*w5T-Ltr zvm)KS2hVVmt=J2~zn=H6{0aTVWU-(OCYn5FjZ~IR1m{V$$KX{~h{^l_e2JHHZq*BJ ze?V3e`za1Y%f(`LB zEIJhE#Mh6;tW6~!rmL#(5EtKIs7pgHUD7vF38LuptqFBjVZ(I`_D?4LMt=hG*<+C` z0`y{`RSSvoVEQPIDd7|F!Qx#fo6HX1YYc@KD8CN9;w_?mKeh~V0&PE?AZ_YS_deJ@ z(AGe%r3jiZwh72|P+LJFR?ItkgrfS}5p_-Tv^gT!4Ijo^P-k-wPAfTUB}Rl?68ITy z{Sd%Vw7&)ymV7~p<{={bTxM6lb~X6kB$h4AXU1>(s%gspuQY%MZl*DP_C{+OkmMjQ7yt%Clj zhrijK;eMA*xIGnI1aipWP! z4tCCk7whi9Lp_?|Q4h9;cJcM1+hZXlA`BTxKY?#uoNm2)XOSh7D%Mng-L(f1uAc9Z zVIsDhN!@74vkfYHk%;($Zb3HP-@nYj3pMFSV!)ri|5Xl7}k0YNQ%i!;!x!5My!V&6-+jIhfQ;$*RfugH|Y&mBhRZ9n+-O z_U3R#fBJ8kfA>JN99FZJ929D#dH_9Sm+*YYHjZb8u%!_5na&FdTEzRcG&*rN?o#X7 zS|cW{3%!8}67n4`MM-+qCE_`9n`H-L=~4PPaBe zZ%FY|*ejU6kiuUtNx#>&)Wcx}#`C*W#xyx}cjf^39LM=2)&h&^&wHNUO|-dcgTzY| zzX(DaT(P)UF9_HFf-g(%o;Xqo7_!b=aR(a!PZ(>ywJeAf6DY06hyht2V3_q4Zcs=UvShy7Qt4v;hX!ciVaxwM0m*NuC41LSis4f zLod;RY^ulVVe8b?UY5xlR$qc6t&sYBK~|!Y6_GBvVc=$bOl_)(i&#(Uw)g$q&}*wM z=WZ&xhlont#*v=MYaa>XB;fjR`H8&3~ z15Rqi8ui@}?RmyFI}s!Lq{Bf@{c(|PFFCPP@QyCCU1^9BlP}M-IE_??z(4JY>=yMR zC4}m8rLC4v-WxBIoo3Uc>HV&t6^EArSGn2ih(endc9n5BG0WVe?_D-SNgl3p_uLQ* zO*x{x4I2-V!Mc-CuA2Rtw&*M-O4c#ph_-%8UOC$zo7!JhpGFwEcnJ*SIv9dIT|#&h zHIE<{7;+k$?7o!%4FV{8SP>*i*0UIJ$%8u8uO8Z+alYMC!%;a1tHpCXXC;hNAWNdk zk26yrv3GAy8N6?E*x`w4SJoRxck}AaWy^?7OyfEhu>3%z@hY)+>y6sB4+2`Md?RKK z7{=-glt-rDp}%7;qHG3AFY}{kZgG5pMnB$3ig#32p~Ui|q$Re&CUK)oPbw(JfJ`RC z`@4hhPgX!=m;dqP__uxS?AJoaAqIU(7rr|?qp42*UXr2h zJ==CjU9ML?b!93A2+dv$N;6bn8kCwJx*TAHtACsRuYse1R~TPA$0!~^2UQ6B$yO7p z?Fkfmy`1F_1dP?C`?%aCX<6J7sHjpS%R6)2k6>HCduGoMEGOl2Rp*v5F9E2MC6x?k zP?rZXyxa%F-*lH$yB3~A?z001XaU@xsSov+8O02s=+*}Mwe<&09zg4DD%cn}!=^K; z3CuOHEPPDEHqh-ib8e4@hAZXudDY+*sj_DvGr9&Uy%>Ie(`4nC4mDA1QW|(hG;$t7@zaEFPYFerlQ!gm2DwI=@{cHBLn^9 zJhlXdj6#(Jij?ziqy@srr^BpXV>^iqW`JB^#Oaw#kJa4mBD(CRM6pLRE0iZ1YMmGj zr(DjbX((d*0=9wd70c}yS#0Naz%2I0i$K;0ZkaTHkES0<8tF()J^iki!HzKjvh?j8 zTKFwUPGNa*gq&WqKKRz*>7}Y78A9J5=D`8nU_TRIVe@i~lSnbv*pT0AgJXLJD=p>9%`vCEXHgrY%_yLbX_l~bN}b?vTexz z4QmubA{>L{32@|eKg>tuMzm%yBm;j4;0kS)b*1A<(7bxmtpN7N%o_ztmyK#LVuOj; z2$9B$Tv-d%3#_woGLd&@sK);VIY7q0c$m^c>SfhKcRf_*p*u+{aI)IaAW5;b!x0Ob z>I4hg^cPv!SQ0gpaA0=UFHAu1Fjs9HBJU5S6`=O2dVrBaE}1)5c$J#99(5^HRE&E^ zed=Qce|`(^IG&Un%O(5yDfE!Zlc@eI0ZUn3neS@{=jil_1kAJ+=y5n2(*`zu_m-q- zApX+Ffv{7mQ_CsDC0vO=cS*!XuWr%&yTGiF8NR3Ob&PuU#SdenSgtOJQytf?mBE;c zhZm?LIX+3!4+Rb}9B_^t?<59yR~%)(LpILOf!K*VTi1kP-C$=t>h^cQ4MR{Z7Z(99J`*Y zF3PT$>6nQXCTCigVvsJsBvi!`s4%(iIDVLCMTLOK#E#@;8qw zK%XHYQjJ2tCm4YHMu}}Y$00-C1484hJ(S4i5CCjiimvlo8p4BgFgj)mi9ln6EY;cf zIqSVJEfn&rU4!mB5`NP3q7RW2ggo8xlPV^*nWF{I4DLvfKMhsS=L4u807(QVxKGSe z$;s5=cC`(;gF(m|W{>I{?Tlsx@84iDRkf%pYTu~9UG4*<1qDWUC`Q90_b1gxv(+0o zq6irfuAPp-tO~Z1K|30^qnaQv`Pox5$gu|AK-VW8pEo~q90bdJ1|}L*PODr5z)}a0 zW>pMr$gki$i2gyv$k@4$7=O-DtB-OE$w2#C4fxo|6z1j`ijVS}9{$SzF_Q4{BM-}$ z1`C4ZeZhd&+Ex%^m=e5w&$-5hLU#Xjv31s*QCT4R&!7Sr%FCZynkL)>Lb6f};^4Pb zJ1snLin-4*!iHc~b9!cSWBc`pnNQtywT2Ss%>rjdG!q~^Xh345oHtT2?!DVFfbAp?=K4P)&F{1rrbMKBA^8>MSgr z?4VWJR1I6ET@#uEph0Bhw(-@9E$!Q&Gx2%(oWLE+J90y_ zTsrnZtMs*3w9SWtYmT#v73yv@I`@wMCPQ2X;{BCH)Aomp%Kfsvl zin>&uKW}O|eTN~w_G;iP;Zeylx7sy|*63X5f zThoirE@aM1zfv5&tT)2ENuJT8)*Ojkz|a{7Fh!K8!u+nDC$Ces24Pc`kV9 z9h~4yTs95RD4w8!F6?=$uB<=jKfFB$CqWb%?3%u^z2WwSq7IIQimNUl#KWS7-}MF` zG>nup@)J{CLk_v!cm>;;E_?5&d!8wfsBd??Joa>`IF{xqJo*5TC&b;L>{iqAw;VXS z61WbsD-nXDQ^T0|LWfs;`>{e9irtEuT-$x4u-MQl!8Qk zdH~eF5GcZ$-tt!i-FxQOTj^YJD#|~_hmtg{VfcnNoWJneD?*|8s`NfnOiF2gdT_>3 zvOL2JR5|3Y8V(aFc07utzGu(MDp-;>R1HpU{I#$zLr42cDLjrfPs(*^?aXvY$X$xF z4Nca?d;h~WVo$o#m=$5EvA)8=-7g^~AZ(YjQ4DmsUE0g679hOl)`9ALn-Z`~ z#fm3F@ZSZ&JR1C2VKXFq*h1X4Q?nxDsR!g(1ye=!DnX=E<|M|-?f8zW+L%T6)C9;F zeHDu=v^2z4FX|7A9fP{a6XlB%pHH{}>OR48EhFfX1COcu3>v8+UY_-zGc-<+85~D1Jni%>V{*7d}X2 zyux)6fr*uAtZMmuCD+qb^a0-V%ik;1;891d-gZpn=4JACxb0i8ZT6AEQ3l#N!u56Z zsd@hd@m*V8r$Ax;#jt?I4&4a=ZUzeB#%U=2B^1G$kP9w=GbjlF(k(GsN91iAq5>|G z!2ZlDShA0WP%~|zOP;}JKdS`0dQvTVBH$f`?T;K*2#0)MW~W?sk+&+;LcM0wHaQGC zrHFg9&ui$WiWk{2b}MilCoF9=-@2UIU=gL${Gv?>3HFQaW+{xPZ!i+q{(}QvJoS!E zECTL;UXLU@(*mIGGU;z}5Y8lL6XWg*nuioiqBDTj(!;_Xnm~jShpt}FKEoU^k*+}} zv>>wgdc#!#xv(wevYFrl%hP+2Nz<-k{*)-xg>emy?P-m~PRBA|3*sEyymB87a~O^7v$j(5<-6$-2Q$GryOIu^9V|LWe9uBwi!@ zXI^NX3>_br*+93e5|qp|r0C-$zC^@v`CZNHmAPI%lz!lpVB2u8 zWx@h2wfQhLbn^2olwxfW_O>f~h4YcHn)4k*@60&L{mFWOyG&X%dfwsxGc#wX!U#CM*v7~fPlKs_7-Hd;P zNb9kLzDv&^U2RyNxo*9?12=oSz?m{mZh?{Uwg3F&Y4n!C-sRAv{iG0=Nb*%HL9s@PC45as|#NwOzxx@cyUt) z?V6IoL4fU#9NH~O*UdOIqQ$*sR9(x`Hi}!&;J$EY;qLD4?(VL^Jy-}D9D-|bcZUE8 zuEB!4Lx69QEoZ;`y!U?h-<@Oh=$_SGRZmw{SNB*lCzvdU!ji7b+SB+7A*O*=8%zVq zEcXqBvfo}q#^=$HX_eTx>w4Bpp{+N=5hq18k!tIG(=2rwX_>|!aj2;2wglJYo-f>Q z7+s*UP|G>nfYc}0$1kEv2{ZBhj)uCeqxxneimI#vj!5=Nc!fK}!V8FD_p(zeWZY_j zN*0{@H4Fy9$d~tO0rDAz>*-y6Cx*0;n;uKNkM8L;j5udf? z#WWv!(kklT3EZ{e6=&U#p?f6oEu;?pl3<r;h27&rrG|)c)fn-2y0d)H z9W(ygi~Kv-~3 z#vq>Uii*hz*3<|F-zUm1f89H7CE+~fbsJ~;tcL?RH=$hUJ1 z8>VA_V~N&{8Ez{)Do!XrMM!34tEsQ(uI!Ryb_^$JM^du%l@w!}l4H3_5V+Y}P_#Sv zPPC63%?#eBI2~9gXr^ZUOVXBwqCi!yQfn`hQz;ijO`iomqps#t_yf`F%_$7C?6 z=CV-K+zR;{m*0=XDN`j+l&g(nWaZryF;0M{=zguiTANp%kP&*mnsR_!#f1MvaW^uu?2(H1 zrRMF{w)R~szEAW&8le+-MdFjy`{7K!ofZ_t; z0LCfL$snixCcpl%>Y(1(G5c;yZ1;Tb)bqWQQ4l?^$BMVEnvRM3F;!H@_XK)6vQXxn zXiBIOj*Fc|-e@R>MgXeDpqhsPY95FBM2+sC40drH#nl)D%dvFA(|cHGoVc@+HD$A z+_V)>-P<6ywxU**ITl+2ov3*ASgdWqKHC$umo-p}w$uqu9&?&a|1whB9$ll{LrOtK zLfludxaDr0z6CcgS@snOysPbL)ndEJ_nL9?nsdWp&?u3TGAf2TnI(K*BW)LdQ^|$& z2)T`*JS6jAK^5S!8E4we-3*~^7U85WJ|gt}N*iZkP7>C516W1>uxbBgj3-bt7M9{O z!V-1w3cI483&f$V7u=!=hxj$KqFHqHR7Gmyy)iZFvfNO?INO~{QBg|J=%+cp<%#Sm z=&CT+^3Tz4nN4}Of9R02c38(fUKHsH6P;o-uH_TvUZlN!lZwGf`!%TRNPci>GFWh6 z+7@pbld712X>^ze-dmBeuVpt-1eFf@(@dBBTdDZ39IibDZmM3)2 zAoK_^I8VkvIo%KoJH&Oc7&@C*g zbAx*-csgCII4_v$^WAe}*kUZ@Q3u`%KU&Di>(aOu)nVKxYF7At7&Fa64V?}VI*1_z zHRL&#wto4q{FZH`?h9pTbqPCxdzq`u3R?U7gUD$E=o+?zrTGfLfmR|tW~r06HKA<- zc-r5JM3^e-PgmbO`=K4lU14Hq36W!~!^#t*mgx5q_sw9)>#{ z60zx>ux>H$BN&>wBMI|0^n6vwd^lBHiP8Vnwq0o- z1G~h@IbBh`_dFt9zwLL2wbg{+VI<%rFkLwM^PyL6Z{D1g!EX0kR-`~l*^fU+R~)cP z)=`YMiyR?cZWXwNu=};}qC9U*k;2rN9qD4b=HaWS-?R`YXI^p8}@@195SN3%FQ zq#q{`MsYiNiUZSoC*x&AOpFA5A!~XwhTZSSJ<@M=oK>$W!Ssy2w{_niK?i27+FJB! zb=N-Byfu*xfx4vET6CEYVo=F1)_xa&i>%`h$uKu-C3$%Lv|-0D}<_BITx}0PI$CzOjjDE#?U`7O$VQX_cV;dpvZsh*%0yF*aS>;D=*54I|>txsly$1 zysXO_Ea4%y=V$b2FU^t%#UQU5>QVf@6TWx;#xF>7fs-mTYe(W+Nh3o&ANT6R(HVWu zlM~dS5KNd|F(^ViY8B^HI=736$cWHoINxD0HFfWv__Eszo(y9@_mc{wRV$M90hZLu z=lz%-RX&py6>{0Se2nmy4dn$|7BAY)Wqq!c>S!4# z{iPEYqrM*0+rS8n2-BQkdDvjs)emm)XHcFfafh_>%CXnaDa&mpD~&DM3;nR75OZvq z$B&aAV=7w-IfPKr zW!W3?*tn};;o`AAG)`adb@iWQ8T7lQyAg;2M}vJw*OPRarBuHtD|@~U%!Y*yfMu5V zViZJSo#dlAp|1C~Y@GT#o`I={+4X>*ZpvE`dDg4v`kiUYnyV_iN+&vy&qio@E?|vlkM_l|}zUU_`-H?Alg_`L{MXl;S}9igfVwkXxTI zDm}w8}-DD(C{wxJMt3?Y9)! zMsJiVlQVk!HH>2IALm+h+~osimxl*$hL(qH2re>OTVH_c6c~N=5}Avd84~II_yVJ| zg66~iO0$!!(#Z}X+&`Bg19KO9Zi#g;@g(2CRz(E4G8RVaGt=M{%ZKVs$Cn@7PzXG4 z9`zK^sfizZJ_6I?EhQcgDcU-xO%KF&CZGz2rq$%GaFDvh;A{j8RSPPd=ODT@_elsT zh8Qm0tXvfejPU&=`nw>`Tn<-!6W_Fzmz=2$S>k;2QzyY%TKHxJ!`{jd^W9$g9%Jfj z#?KeqbjQoi6JBV8Gcnfu0}U#J5>6(foMXPTo5#obdwk711Id`lO$&*r4nhsd60oJj zY$wx5w@Yi7<@L52D3!dwvv& zbmD&f&T^mC-|*aDwwTS(4?l&iM>j29&Hi$b9*9n+KC6N5fbL;K}S@WX2c z9p-4rM~^R+b#Mkv1PGBg5p@%r$Vyo0;Y_fuve?KV263ME10}OS$P zRHP|eAqE1vNcXK##v>F&uC5QSrM^!1Mc#-N6!FYf-qp{Dd z#X2e5UmrzphQ4Uq1pRk{EzmHyCs81*MWfgfGb=33)rb(DW`YN*)p-*J#ShDCue3@m zJ4rX9t@*1Q`20I0%)T;M|1{i+WZ`~({;W+u+h+3%yQ$CXaS6&?GIGL19)t$XfYYHeZeX3B z24zompHNHAv5;x%@yp2?%We4)4tXkS@50tbM((_ZX|*0U=kTRL*g|W-Iz*AY4K(qc znSSC@Z85}%Mg#SaxkdV@cJ-hW+d6kkGK3%VTFe0!g7u#M5=3cvsjPY5hZTB8hMuSR zx@}M2M8m{hiDOL_y_-%QCJ%_u^ZrISEzFi_274^>Ld?qCrdEK~keS3}3co^QR-jDV zO*CzumLcngO3~K~wOh-c7b&uNSbrmTV>-Nr?Lpg_-kfmQhes4naC}<-5pT>)grkd- z7mZ(*Q-4WAfaW_Vq|6fp%US)eKxHO!#71I`8;#B{oNE(V4P`^3;OER5R7wp(4*2gQuv7q40*)--wi@Jb~H`Bmo)ge1vcYM8_H)G(bdAqBH?GKC}j0-4o=rJxFEZ$(t zGH+MKi@5W<)5foYO&szFL`IT(auwYML^ICN=Y4v%yoy{%8e+M?tsr1L7Tc-c`f6Y- zI2PxXVvzZLfQwQRts}lKOSiId{T(=Nb~YF13bU1zhY)GA9)~b~Ilqgax^!fS6VSkx z#+yr(;SJburX!j!`I_j7K(2m6f*qp2HLC&`UjDB3mP~P5`C8BeXj^A)XfgT1GnjSF zKi}I2As9-44dL5({4=(`nsB<8Yh&0@^8vFss#AdcfU#@c4&6LK()j~im>raOKSN!& zez9Q)#tbzq1RdGO{)B?uFA}1N_D;vy#t;d*^V;t+!=+gyc)R+zXo_*%2xV{9-rsXW znq~sncnnAH06G?n+MUGQJopK{2d9D;D3*s?=${VtNJ{_)*^OLJn|z!L+ITV+hqQKK zemX?=C@_!SlEz7@K(k7n6Kr)tL$p=>S2X4GOx8XOK6O`((Cr1HbFdM@f&>`m&lL4; zy(gPnXt$Wt5C!>PKkwo)`iopTOoxz_^}$#GH9kx~kX0drd4TKTWlYUIw;Sq_mlZwN z5LzDzwK`nO^s?peVTsUwl9iMf>)u2TtiPe zLb_YiAR^}fa%n`Ee~%=It6cX%%!5(Z;(30?ng3Zhm#Otwa6YVAMxyz>JaMen{rmBbeP${f7rgCX=>(# zaWKKUm$}2R9=+?Oub^A?U2GVU>vujJr7KN%n7?=jS(DQl{J+x z0>LJkI>ZnCZ-<$mc)qb4oUW|;wwZvb6{vggh@L!P|L`A#$Ep^5k_h+ZiM{6PH~hVC%PQS?h&3ydpp%4TS1H}!t{DP8_K)CBJl_H$Mb!3chG z!}r@9My1x3a7s@Z!ezOFJ(?YQfSl}P`f6Bn!|}m4rMDO@-mUE3oi)nhF4_0<;W_>+PIk&kb@-*r13!KrHiYXLniZU?8-YHLU-R+p` zs2VlvW^o~A#foU!WMX|RGP3$=AvqWDuP$R0Mb`p`US4Y)wf+A5`-u2(!>FvC!>$nPTys|LCjEtNsZ>d?(Fb_l znOqc$jqxc`;SY|RRTCKIQOkYUpNqZS#526SBz*P_`N|}CE1+0!cr;_6O!BC4@}LHM z6u3rO_|0^F z{KN^q8pGv^S|UU1XUtbGPale79iuvKxQ{>QgUia9nGSEO8r2fJL@uZBrpe>km?|~8 z)k39hd9Z%CKj<)iy=8%=)JY5U`1+=OA7X**v>M5SuYJD5s9wr0#P=nF*}!ZQ=1sda zJ4cxbPm7|ha=`PZwUw>*lC`KJzT|l4SY>&T6U>JJhnR6_GLCL#Fp@Vqz;`we^s)Vo zVQ&+#M2sLbJqwer*L-&51?FwXYv6<+4;#eIoLV$Sck0utv}`A7VFPtq);u}zt{=tX zo7?8+3C?Szm^wv+J&}YUfyQqNIz;BG~hreEpa1#eTc`}3_aK$ zS9dOlU77SjpoXkN1cJOE&*2%yjFmP=AIh(K6)qUuMTCP5A7eV2 zI%emKa7iJ*wZJu&5sX-DtIy}=^c4@y{5w-Vdg1ZXYt!_!k(!3V`7fULL5kM7&9_~a zvR~@nIwf%;fBdwI_sle{gn#SLK|ugtj{u;LwKc>|Tz=d&{b97a)N%9Kvv)4C{Kad* zl&$-y+upv>dO+%u8bw1XtW53!-hj(;@TuJ%>LIDkTmMA{i{0(S<>F=GX#n{RYMpZo zcC9t}q<<{ShC4dJ`~HYqALRF$0a-eisc%eOCo6Rfv}TGv6!NytskIxsWu@WzJ+>x^ znYDWrKmP>2XE%k45GM0k_GE`Aix{bLb%XkN?0pS3?B?oWFK5B-@8`8KK2W;~iCbnd z)Pl^YM?fA42q&85oKHyR$$V2sgHeM^dj8ejz2CCV%W2HQgnAnSC2g3;_M<}X6i`}= z*h-FF(t?@6=aPHou2}mR@mHdTYXHE$!l_(TUC|EBrz&l`f7sG@GeQ|EE}XkxRiT`V z*ZrfP_RSz(B`PYAHVd}t?m=ksJ&a|1bH5A;YvP2*(j7x1KAGt(7Q&YJm+R%J(G+W* z={fU@)YBA-9YU?j18(r#UNd_^3NQ>ZYGy9fb}xeS=@qvDqKgAfd&)AJsNkh8Rp$-g zJ#};6JRMwEiJkBoIUgdU^(gy=70<>A!rqR2Qr+@cwFZI08Ul1*k~h)azz2C9T|(mI zR>mLXC)h55z0hEesvYdodY>9F%(C&Nj>iP3`l_I=PGf?A=%&6u70}h)&;@(@DOL}; zC{amP+t-p@FmvQhar^L=W>(s@sjIAnc!s_mKCSt6ttU{q?&n15Y^X8Zx9(&dcpYw( ziT+y4xd6TBEvvJ-+!@31?TZig2ay7N>-q)8A=6TChZJ|7l9m}1Qz+(?)_hGU2Xw

>E6+Eu5{G)i6LZj_vyu3e!*rZbKTCP+HSdT(8f|*ez_Fv+;_E(y+3B=b@{BSN~O>JN0aU~$~{keRF>fYG2 zy9{Mf9ddlCDowut2r0{u)%|)7=l*coh7w7gWpVr+-h7dY-tKJLA0HnJu}xrtusmS+ zk*SI~GI-;1LAbOs);!eH?fo-{A5pWs@DEj~fU0(!FL`;XlTv$VC=R;aoq^_Z0T^Qt zRt+;}1Mhk|c#AmXoSwuKIhZJ@La(2N5crHFQGwFW}0VHq`<;J&S8i2S8-nKfB=Xs{YDM{Jf0H`6jHwX)r)gcW!THwO#+xI?D5YO-5zf3_i1p%Hd|I#Q&BRgZ(F zaQKPX)i2iU{4>f)r<5K(NwjHI$>2ebp3z6!gQvs#LMj7UU$9(CeU zVmas+M}Dx##mYiIxF=$^3G2kTr(yMCdAt+u9|knKbzxwO88GNWWy_ElZ&@C`2YuT% z96i%}A?`2;^y=we(R`afGN zF+wDnqg@R2;PGUr{P2q+6!`f4ShfS-pzhpi3&P+q_J_c9Ie#FxlgAg-#rq44yH>Jk z_L&@Ke%YNEa7!G2K3jPGala40FfK+Z;FwRci)4mos_L)4o_Awm(Bs9h`fOfl%;qIs zJPAth6_)c}^iifT^|Wh`lyy@VlZ17lopk4Vc$xTqL8RlZRL{RnKoFpi(ljxlfrxI0 zd!%}%CAoKjeshu7JF=&l)i=MJ$&9>9C-dI;T0cHnb)V1}hO}b1{f^{bba(AdSKQUR zpef{zl|=w2-L^5(mQ(#4uNN53YW*-+Bo0|C+VczWAdpP|sP5ve!Qj5I*EsrWMCbb? zP4lQ(+B_K&#ri!-UXf2oLav`p`->M{UpNH#{1Du&XDlgj5`iU^X=cskE72pEzRZ`G zwySyY)bPe+PYXSp9fYz5-!kzea4BpKTfM05)+LjhdkB-)=|p>_2*@HK`yu;A5@EV2 zI2pfoOs{xSzVqC|qeIFKq46Xw(e&h@nqoJdA|QS?#YHluIyaQ0WG5Go`2~Xzy8Z)? z3}G+;gB<$UnU|e^G7>tdYf8{^m$%1-sI9{{Bgi~WZluLy3*68J3=PpkES4T_*%F~o z)~hnk+qp`&WQYy|TW#|Nro-r+cn!2fTAUzP9shoz&4@GV)E1-TXA7&M0HNbh&y+)1 zuNFv*>g34vJ*0fjJWQx8s5o`$_YAks%q$E!`=1<0Xex6K;4U)afiuA$7UHO)MBQCM0e0^+o~M*_W7rmBvFQI?TDSCM>R8`#x*+98TIZv~1P&Md_T+AJ}oLcOR{QFb2UGM4HLxNl6XrW?EIZ8+nt=u*B;5HE<1TR1L zP@yW6W5aAw1x!xv!F;_CjFUsrs7*}Q%3p`c09UEhL`1$j+X{12X2W#gsxWt$%rj0) zvY7bJC5#M}mc24c1a%U-^THG=zY#ct9GxDtI#Sczn_G$A~%RdabPcUN0JD=I61g8Zk4X-dBEj)T?|2=vWcL5 z$Q+Qt{D`$(tH>H7e#LY2kPmjgo=C705XHWBV`KJdWuTCIA}v|avS*u1Huh889nFYa zu}2vsHGYE1KtdZpE~Qh7B2jwy##8+;{~RH8bRIRA)p4-gC|d8kOwQ@m`m+2wBb0-m zFMBvIZxZM|>G&6R5hy%XQd!5RglvS1tt+<16viQ}C?s!UP>yA|g*>arCxRz?SqD*4 zt+|y=rnq6aFL9OaHk+p}JSG*>KiEXjq2Rk7blu>N)ZHe3+RG6Aem1vQL0I^RBE4pD z22|G(dEk3ksqWT`&~FTGCB4HH#QMA%?iudy58LhLiiIr$KI-0dNsF< z;PveRxiyBt=-fW_!5)t~p@UL?A}X&PgB%8TJj;jPEo69nME=gv0wMd zc1q8a3Yzp>Oe3{&kYlE!?^+SfShG}{2$j~g!K9A#zRw2qW7Q#la$K{xQ02N(R`T}= z#b4)@2M&!HPQ`N;?)t)OE%L}@*16GwU-+8+(bd?({mkJ7n=GTC4`ye)2yGP@tKnV1 zokvU4PC$?Bd^E1b?PbA~6_h!gs`j8|CbIf=smd7E)W)+9RtNUfY2bMzjIoj!GfkSK zyP)>5x2Qc)t2%v%!xmK(wB{=$4U;-5z`?8Yv1x%>RKk!&Weg5oa+Pw zjO^i&(4ZKLk!Yia&f|o6bnZ=?aFGDxz=hz>ANwBXy z+s4qq4U`}5TcGW(9WM9%?9-g!bxlE3ufT$1)^r-SOO%@E> z^@Ek(YshFR8HnB7bYHzrJQ0&1J?DLxoTo6UUd6BVs?9`WsD0xU6~?Qnx+KYycNSoS zVsL5r9~nq52r5vy16lc9@0zI>S9H7)C}5J1UP$W+W6u=6I*@-7llI4wPz19sUWKa1 zqH;`gN|5ZcnA(EIujS2ui1}3hP9D2bUF`dPtD8Cd4JEbHmrKKOx}eM>yejbHDr4=m z;2H>h3J>zS6CI`wrd)>k^5LS9td&UB!g!HU)`JLvLNs{ zhdwn{x7;68n-J)p1Ja>lKA0^RG3-b%&!@^;6<)RjYq)+vw4@{)$FqZuMKem*U(Za* zUQ8+Fdv6Bad!lom3F#kTgwF&O3BE`vfw4G)lnzJcFE=J3NFu2eD*i03p1E3}>+@%L2}%cv6ZRQN@c z1fpv3IH7dj{1_^Ut{ys1J?!evG;mEMMtck4eg(G_Z?I?LPVA2|-OLP=GG30%Q6^Hh z?2*QBOu`~PjRMOffga`)J0J+vuvtjsD?~%s3R@gC7{j+^VuIs$l@TUhY%)pcB`{R0 zD|*JMZa(0OZ!}&|VWR6(3K%jXbmh3}cyK5yuxNDwdUH3aEYed_^ssKW@E14@SefOo zdoK##&Qj#>r9KDnRo8m9D;ZO3BV{B=zYI7o8cw7BLS*`rL5se+u+ReW0j)|*ZFgkn^ zn})>xwN0+B^v6v}-soA9nc*dS?b|xjP9oATUt3qiB@^)iky<5X(Nki_fJmb{d(Jw)lHd(69g&g0$`IPC^u?Y6Q z9HJ%QZN|ji>?GVfKQsO6kGOnFbb8Av_ptMo`|Gx-_6$j%#uGiB70ZG~pxlO077{b| z6r&Nim)Flajd2o6>BJWc-pPAJxahy*y%*khDCxgC07Qmqiu2}ZYn6UKPIxz=PaI^Y zCAxDx9XkRw|B>ba+^_oc-PVRNIiMj^TCj2_cS(ACChb1r!aG_MzeeeKTP<&Q0hv;h zCHLbfgd+<4XNC1`hbT!ru494Y&i9b+?o+7OK{Xqx7FCLpd~uKSq|tG@`412QdV)oL z?E8aTz;^W!^t}ByKZ6bh$s3XQi&F;m1snH$`lFPu38ZeUc*xS}svwiq9WFYnNP6Mu zQZ#l=<*NzrTObtuS^)d0ax|ZGo39perfDgy=V*TVVvBCWqhq1qh-TC*h{ogeM9-ah zlgBy|YCG>776lwhDibsH$g$|zAkUJM5uA;pq}7m?+A|l#ZSQ4Hr*U7#mkNhNCnFu0 zc#;}9Rhoy8%8z z^rrl@(Y3f=>irfN?1;ZbJjaou<$J1HnI{;rJGmAC3V$(M$#MUVl?jXcdii@nRg2JJ zF`P`s4FK>nscdo79e>=Di4r{l1*1kQFCFO953XVzM<0@^rSll7)nonhO#adDvS?W+ zVlxFQS?Bu!1k_)tViElH{0!dJ?zB8&>u|dVy3sm8?CFKT;>-+C@$(&}uR%T1xBW<1 zJPBLdrSm7+fw#2f zdl1<(@C3^BqsVZ;rywGgLgSCB}OK5JTp!c@q?~j*oeecOH{LRko6-SQB6~ZWBA~8 zQ?Q#tfxgEFcCv!dy^S|qrI>-c6|4hF9k3?O$8chgDh=Nu;K3H83pQiTI}Lw|+rSkN z!ln*Qt;aI#J~CWki*_rSCH0$q5!0wFCe7T!FrH*)sWVaC{Hz(!E*`^`^HqdQ+|vL1m9 z`2fvp5m)ewveu1kY@0&_J_AXuN&5Ej{F|_Km3u}84SM#jQ9FKh@<2+IUS_p^Gi~35 z)q{pM+O(pGo@P=VSMoJqM(B?|zSRK<@r;51^x{l0InG{m2#MmxL{Zlca@cJXY!UYu z<=XWw2bmV;CT!tVfxO-(0ET#f=qcA8CC`b02@~v{`>$WbT;DNf=9)V%yP;(U0~NV` zF?Gx5xV3H;c)QD6`cpaEw4qo|o+*EGL*%vmSk7lIih3C zwKe#5d3RM!^yk!}#5Wb?=qWbb}zoU5{HaJKf(myF>KihcV1ieW~V}7?a z%Z`a)^F`|Du{3e5#Y}@@lYBG9wP*eC3fa_J403GOKH)@2g+;^eWUNzQC3h^Hw7%dp zL(=$uFxwnw4MJy1#2!w%-dbtKl+!pTy!z|sJWdlZK@Ds8c;TRB6_bAQ^SFWsQ)QR$ zFC5bu4c3+0)7zgD@!Flg0usNb@y7mSau0-vqUi7np1vtu#_Z^(G}2|ds%CBxKypGT za?#a>_CAc-k`+uY%vr~Lt7VgM4#^zD*^x5BwVwF1t_X8gRt= zm~lBmJXlX*Id+sXn_*Oo+L}9d(w~tB_T!>F7)K-%&@@*9h(BMz?m0+MDkX}mPGqPq zN>lI~orQW9#^mDzF0k%5)Ay|0U}@pAXdH<)4V0|<-{=qm0|~mz`_SxfM*wxju8zpF z=2Rvzr280R z#?u4N>a+7QaAl%~-5F_L0`E)2fAqmT!&v`zV3cBO^oaafT+Dny!0M~PleMD(c9nAZ zEO;#OhhN!Ra<;?4k;E__AN}qf=H2((6aqbn{f=BIt|F)0NUh;))C)JOM)_BaSgUw& z_?{7(6Q^O*WvuduZr{c^yyauo!Nlk15+-anb0Z4Z4K zUR~sh6EE*7GSLmn6Yg93iFVf{rbz@?2-N13pDfFJ(2nL|d}OYvFjH~JEe?(kg8gA% zMo{FZErB3?gC%z*>xVNRhuW`UC}Zw`470q5CbLi}JrfVqPE@iyL&~J2qm(RLiQmlW ziLU_&@DZ2<9me)jkV;P3R|zB zA_yL}xF)9Aq9t~3I~azd z#hD!+L$jKvU?Js9FSE8q;M3#@irJH*zl)+9Jv4KE66ccHJ`y@3Xj0`i!n zxq2t>hxSnDNNq!d1W_@oC<@2I9OafA*xaEF!+ol(@BNx?E`mC=nZa(Coo@ELbi1>u zhb~9aA{nGoJ-|<`Y6l|o@q&e6PdWt=lFm#%l;2(0H;0Nw2Rqz@s4_g7@#t4}+>hTp zTBBq`mId{_5zW^U*%g2s{1G~MhE6{1?(h}DWlC#VzR7*#Ziht%v0deT1^6nDGOm>V zND5y?jpA2|4MT-C)c0O6R?9r}@H>QpxDC~`_yLyUggH@$~D*&RjNXvx|KZCCQp% zUJ9A!>^AFl_U3&_behELAdx~)-2kfgD-mbE^+6+Ep`P;AIkAgMuv)rhfVBUn)?+@H z2wmI_;o*L!IHIkB-surZ8zR)HHTc-oy_ZGK^R}Y~Hvsxap7zRD7V9cm8H94$x{fzL zLmj6$SxT$#Nh(ZzPt6{7;pn^;%M3~1cbD4csNXM#Vs6Y}Ec(|I<$g)#|6!2+eAVl| zDh{|6Uv>HM@XY}90mxa4RYK$=-#PM?WchhQ|L%@4uDb82h&CJOKW+j>`-=j5<#Xu} z3fK$!{?yZ>YdnRd5iXhu#Y~C-nPR=11oV3zho)X`$AFA&1Y37AM8)T9D-$Pa@P=nD z5yKLjhn{lB8q)jtMCkq7r*}$1RBmUwVzet!;Y$;Qk5m2+??0)3CPiAdizX~!pLx$w z!qwY_&lYdf4Bz$0^C2kx+rE>p?`^`}-0TCQ>frm+c~+sL&H=i9Fu<|&Z1cM>jr!}4 zk8;EG<@nw2vfgxf4DRbC4kAj9ZV#&KQoO)8#tO_6DS95}0hE`4KihFH4GSExT*P9M z`ot8Yi8l>lu7Hn`FOaA&Z}1bUq3|kkU4qyVR zx|!Q+0N9zBfD(4bmaYI+mfze8#`fkwDRVmyb2n>K)(;{T>VI|IPR3 zxWD;8Jg;qTHnzWIzsmay#B0sQ0h$m<*6RfS1`pElSNpflKMlh2iUCwWzW!am>i=up zU-fS}ul*p|ziFWME5=uzzX~fW$bcM7Af100;nnf4V_)mv^8dsGin#v(c@_Md@H(Ua z+<->>Gj9HX_z!=v{~_Tw=igkfKwfWt8|E(wug3n9{?}yws$aprwqJQTSwZSK|HK9I z;@>n-9|sdCa$afdAOx=kMEflR#Pe#9*Zbdejz6M|3uOGi%By33JMI<7zshg)uU7nb z$E(nPmfz6;8uvS5Kz;wLUxED*~EfZvU4p0FB_-n%du3wkJZ}0sX34itfQ}=88 zFZ%ylgY^HeIzUEee_eJ=O!|Lj1@8aOWxe>tjn<(yJ>S$)|Ux0scSXVE!8iD1p1V0+|1W^Y!+R{4Z;5=Bmrg z`YKKTKl6CK|D#{d+TPmDRhQb$#n_aA>oxB%Fz z^tZM@kb(9#H*=TQEzr$e%p4R<=0G`f2TM0A05fP^{c#Bh{JCj+fzxfXs4;qcTGBL*@c9sbw^$JXw{Q*wo;pA5aL&;^8gTBain?Rx$-V6r_&!bieF@jxI6X1Z*{?F3_ z^Pk%;XtsZu{`cAeZFRqA{O{aOMrjt=I(15ujK>aCI|war+%8T

  • - The CherryPy Homepage + The CherryPy Homepage
  • The Python Homepage @@ -77,7 +77,7 @@ class ExtraLinksPage:

    [Return to links page]

    ''' diff --git a/lib/inflect/__init__.py b/lib/inflect/__init__.py new file mode 100644 index 00000000..78d2e33c --- /dev/null +++ b/lib/inflect/__init__.py @@ -0,0 +1,3991 @@ +""" +inflect: english language inflection + - correctly generate plurals, ordinals, indefinite articles + - convert numbers to words + +Copyright (C) 2010 Paul Dyson + +Based upon the Perl module +`Lingua::EN::Inflect `_. + +methods: + classical inflect + plural plural_noun plural_verb plural_adj singular_noun no num a an + compare compare_nouns compare_verbs compare_adjs + present_participle + ordinal + number_to_words + join + defnoun defverb defadj defa defan + +INFLECTIONS: + classical inflect + plural plural_noun plural_verb plural_adj singular_noun compare + no num a an present_participle + +PLURALS: + classical inflect + plural plural_noun plural_verb plural_adj singular_noun no num + compare compare_nouns compare_verbs compare_adjs + +COMPARISONS: + classical + compare compare_nouns compare_verbs compare_adjs + +ARTICLES: + classical inflect num a an + +NUMERICAL: + ordinal number_to_words + +USER_DEFINED: + defnoun defverb defadj defa defan + +Exceptions: + UnknownClassicalModeError + BadNumValueError + BadChunkingOptionError + NumOutOfRangeError + BadUserDefinedPatternError + BadRcFileError + BadGenderError + +""" + +import ast +import re +import functools +import collections +import contextlib +from typing import ( + Dict, + Union, + Optional, + Iterable, + List, + Match, + Tuple, + Callable, + Sequence, + cast, + Any, +) +from numbers import Number + + +from pydantic import Field, validate_arguments +from pydantic.typing import Annotated + + +class UnknownClassicalModeError(Exception): + pass + + +class BadNumValueError(Exception): + pass + + +class BadChunkingOptionError(Exception): + pass + + +class NumOutOfRangeError(Exception): + pass + + +class BadUserDefinedPatternError(Exception): + pass + + +class BadRcFileError(Exception): + pass + + +class BadGenderError(Exception): + pass + + +STDOUT_ON = False + + +def print3(txt: str) -> None: + if STDOUT_ON: + print(txt) + + +def enclose(s: str) -> str: + return f"(?:{s})" + + +def joinstem(cutpoint: Optional[int] = 0, words: Optional[Iterable[str]] = None) -> str: + """ + Join stem of each word in words into a string for regex. + + Each word is truncated at cutpoint. + + Cutpoint is usually negative indicating the number of letters to remove + from the end of each word. + + >>> joinstem(-2, ["ephemeris", "iris", ".*itis"]) + '(?:ephemer|ir|.*it)' + + >>> joinstem(None, ["ephemeris"]) + '(?:ephemeris)' + + >>> joinstem(5, None) + '(?:)' + """ + return enclose("|".join(w[:cutpoint] for w in words or [])) + + +def bysize(words: Iterable[str]) -> Dict[int, set]: + """ + From a list of words, return a dict of sets sorted by word length. + + >>> words = ['ant', 'cat', 'dog', 'pig', 'frog', 'goat', 'horse', 'elephant'] + >>> ret = bysize(words) + >>> sorted(ret[3]) + ['ant', 'cat', 'dog', 'pig'] + >>> ret[5] + {'horse'} + """ + res: Dict[int, set] = collections.defaultdict(set) + for w in words: + res[len(w)].add(w) + return res + + +def make_pl_si_lists( + lst: Iterable[str], + plending: str, + siendingsize: Optional[int], + dojoinstem: bool = True, +): + """ + given a list of singular words: lst + + an ending to append to make the plural: plending + + the number of characters to remove from the singular + before appending plending: siendingsize + + a flag whether to create a joinstem: dojoinstem + + return: + a list of pluralised words: si_list (called si because this is what you need to + look for to make the singular) + + the pluralised words as a dict of sets sorted by word length: si_bysize + the singular words as a dict of sets sorted by word length: pl_bysize + if dojoinstem is True: a regular expression that matches any of the stems: stem + """ + if siendingsize is not None: + siendingsize = -siendingsize + si_list = [w[:siendingsize] + plending for w in lst] + pl_bysize = bysize(lst) + si_bysize = bysize(si_list) + if dojoinstem: + stem = joinstem(siendingsize, lst) + return si_list, si_bysize, pl_bysize, stem + else: + return si_list, si_bysize, pl_bysize + + +# 1. PLURALS + +pl_sb_irregular_s = { + "corpus": "corpuses|corpora", + "opus": "opuses|opera", + "genus": "genera", + "mythos": "mythoi", + "penis": "penises|penes", + "testis": "testes", + "atlas": "atlases|atlantes", + "yes": "yeses", +} + +pl_sb_irregular = { + "child": "children", + "chili": "chilis|chilies", + "brother": "brothers|brethren", + "infinity": "infinities|infinity", + "loaf": "loaves", + "lore": "lores|lore", + "hoof": "hoofs|hooves", + "beef": "beefs|beeves", + "thief": "thiefs|thieves", + "money": "monies", + "mongoose": "mongooses", + "ox": "oxen", + "cow": "cows|kine", + "graffito": "graffiti", + "octopus": "octopuses|octopodes", + "genie": "genies|genii", + "ganglion": "ganglions|ganglia", + "trilby": "trilbys", + "turf": "turfs|turves", + "numen": "numina", + "atman": "atmas", + "occiput": "occiputs|occipita", + "sabretooth": "sabretooths", + "sabertooth": "sabertooths", + "lowlife": "lowlifes", + "flatfoot": "flatfoots", + "tenderfoot": "tenderfoots", + "romany": "romanies", + "jerry": "jerries", + "mary": "maries", + "talouse": "talouses", + "rom": "roma", + "carmen": "carmina", +} + +pl_sb_irregular.update(pl_sb_irregular_s) +# pl_sb_irregular_keys = enclose('|'.join(pl_sb_irregular.keys())) + +pl_sb_irregular_caps = { + "Romany": "Romanies", + "Jerry": "Jerrys", + "Mary": "Marys", + "Rom": "Roma", +} + +pl_sb_irregular_compound = {"prima donna": "prima donnas|prime donne"} + +si_sb_irregular = {v: k for (k, v) in pl_sb_irregular.items()} +for k in list(si_sb_irregular): + if "|" in k: + k1, k2 = k.split("|") + si_sb_irregular[k1] = si_sb_irregular[k2] = si_sb_irregular[k] + del si_sb_irregular[k] +si_sb_irregular_caps = {v: k for (k, v) in pl_sb_irregular_caps.items()} +si_sb_irregular_compound = {v: k for (k, v) in pl_sb_irregular_compound.items()} +for k in list(si_sb_irregular_compound): + if "|" in k: + k1, k2 = k.split("|") + si_sb_irregular_compound[k1] = si_sb_irregular_compound[ + k2 + ] = si_sb_irregular_compound[k] + del si_sb_irregular_compound[k] + +# si_sb_irregular_keys = enclose('|'.join(si_sb_irregular.keys())) + +# Z's that don't double + +pl_sb_z_zes_list = ("quartz", "topaz") +pl_sb_z_zes_bysize = bysize(pl_sb_z_zes_list) + +pl_sb_ze_zes_list = ("snooze",) +pl_sb_ze_zes_bysize = bysize(pl_sb_ze_zes_list) + + +# CLASSICAL "..is" -> "..ides" + +pl_sb_C_is_ides_complete = [ + # GENERAL WORDS... + "ephemeris", + "iris", + "clitoris", + "chrysalis", + "epididymis", +] + +pl_sb_C_is_ides_endings = [ + # INFLAMATIONS... + "itis" +] + +pl_sb_C_is_ides = joinstem( + -2, pl_sb_C_is_ides_complete + [f".*{w}" for w in pl_sb_C_is_ides_endings] +) + +pl_sb_C_is_ides_list = pl_sb_C_is_ides_complete + pl_sb_C_is_ides_endings + +( + si_sb_C_is_ides_list, + si_sb_C_is_ides_bysize, + pl_sb_C_is_ides_bysize, +) = make_pl_si_lists(pl_sb_C_is_ides_list, "ides", 2, dojoinstem=False) + + +# CLASSICAL "..a" -> "..ata" + +pl_sb_C_a_ata_list = ( + "anathema", + "bema", + "carcinoma", + "charisma", + "diploma", + "dogma", + "drama", + "edema", + "enema", + "enigma", + "lemma", + "lymphoma", + "magma", + "melisma", + "miasma", + "oedema", + "sarcoma", + "schema", + "soma", + "stigma", + "stoma", + "trauma", + "gumma", + "pragma", +) + +( + si_sb_C_a_ata_list, + si_sb_C_a_ata_bysize, + pl_sb_C_a_ata_bysize, + pl_sb_C_a_ata, +) = make_pl_si_lists(pl_sb_C_a_ata_list, "ata", 1) + +# UNCONDITIONAL "..a" -> "..ae" + +pl_sb_U_a_ae_list = ( + "alumna", + "alga", + "vertebra", + "persona", + "vita", +) +( + si_sb_U_a_ae_list, + si_sb_U_a_ae_bysize, + pl_sb_U_a_ae_bysize, + pl_sb_U_a_ae, +) = make_pl_si_lists(pl_sb_U_a_ae_list, "e", None) + +# CLASSICAL "..a" -> "..ae" + +pl_sb_C_a_ae_list = ( + "amoeba", + "antenna", + "formula", + "hyperbola", + "medusa", + "nebula", + "parabola", + "abscissa", + "hydra", + "nova", + "lacuna", + "aurora", + "umbra", + "flora", + "fauna", +) +( + si_sb_C_a_ae_list, + si_sb_C_a_ae_bysize, + pl_sb_C_a_ae_bysize, + pl_sb_C_a_ae, +) = make_pl_si_lists(pl_sb_C_a_ae_list, "e", None) + + +# CLASSICAL "..en" -> "..ina" + +pl_sb_C_en_ina_list = ("stamen", "foramen", "lumen") + +( + si_sb_C_en_ina_list, + si_sb_C_en_ina_bysize, + pl_sb_C_en_ina_bysize, + pl_sb_C_en_ina, +) = make_pl_si_lists(pl_sb_C_en_ina_list, "ina", 2) + + +# UNCONDITIONAL "..um" -> "..a" + +pl_sb_U_um_a_list = ( + "bacterium", + "agendum", + "desideratum", + "erratum", + "stratum", + "datum", + "ovum", + "extremum", + "candelabrum", +) +( + si_sb_U_um_a_list, + si_sb_U_um_a_bysize, + pl_sb_U_um_a_bysize, + pl_sb_U_um_a, +) = make_pl_si_lists(pl_sb_U_um_a_list, "a", 2) + +# CLASSICAL "..um" -> "..a" + +pl_sb_C_um_a_list = ( + "maximum", + "minimum", + "momentum", + "optimum", + "quantum", + "cranium", + "curriculum", + "dictum", + "phylum", + "aquarium", + "compendium", + "emporium", + "encomium", + "gymnasium", + "honorarium", + "interregnum", + "lustrum", + "memorandum", + "millennium", + "rostrum", + "spectrum", + "speculum", + "stadium", + "trapezium", + "ultimatum", + "medium", + "vacuum", + "velum", + "consortium", + "arboretum", +) + +( + si_sb_C_um_a_list, + si_sb_C_um_a_bysize, + pl_sb_C_um_a_bysize, + pl_sb_C_um_a, +) = make_pl_si_lists(pl_sb_C_um_a_list, "a", 2) + + +# UNCONDITIONAL "..us" -> "i" + +pl_sb_U_us_i_list = ( + "alumnus", + "alveolus", + "bacillus", + "bronchus", + "locus", + "nucleus", + "stimulus", + "meniscus", + "sarcophagus", +) +( + si_sb_U_us_i_list, + si_sb_U_us_i_bysize, + pl_sb_U_us_i_bysize, + pl_sb_U_us_i, +) = make_pl_si_lists(pl_sb_U_us_i_list, "i", 2) + +# CLASSICAL "..us" -> "..i" + +pl_sb_C_us_i_list = ( + "focus", + "radius", + "genius", + "incubus", + "succubus", + "nimbus", + "fungus", + "nucleolus", + "stylus", + "torus", + "umbilicus", + "uterus", + "hippopotamus", + "cactus", +) + +( + si_sb_C_us_i_list, + si_sb_C_us_i_bysize, + pl_sb_C_us_i_bysize, + pl_sb_C_us_i, +) = make_pl_si_lists(pl_sb_C_us_i_list, "i", 2) + + +# CLASSICAL "..us" -> "..us" (ASSIMILATED 4TH DECLENSION LATIN NOUNS) + +pl_sb_C_us_us = ( + "status", + "apparatus", + "prospectus", + "sinus", + "hiatus", + "impetus", + "plexus", +) +pl_sb_C_us_us_bysize = bysize(pl_sb_C_us_us) + +# UNCONDITIONAL "..on" -> "a" + +pl_sb_U_on_a_list = ( + "criterion", + "perihelion", + "aphelion", + "phenomenon", + "prolegomenon", + "noumenon", + "organon", + "asyndeton", + "hyperbaton", +) +( + si_sb_U_on_a_list, + si_sb_U_on_a_bysize, + pl_sb_U_on_a_bysize, + pl_sb_U_on_a, +) = make_pl_si_lists(pl_sb_U_on_a_list, "a", 2) + +# CLASSICAL "..on" -> "..a" + +pl_sb_C_on_a_list = ("oxymoron",) + +( + si_sb_C_on_a_list, + si_sb_C_on_a_bysize, + pl_sb_C_on_a_bysize, + pl_sb_C_on_a, +) = make_pl_si_lists(pl_sb_C_on_a_list, "a", 2) + + +# CLASSICAL "..o" -> "..i" (BUT NORMALLY -> "..os") + +pl_sb_C_o_i = [ + "solo", + "soprano", + "basso", + "alto", + "contralto", + "tempo", + "piano", + "virtuoso", +] # list not tuple so can concat for pl_sb_U_o_os + +pl_sb_C_o_i_bysize = bysize(pl_sb_C_o_i) +si_sb_C_o_i_bysize = bysize([f"{w[:-1]}i" for w in pl_sb_C_o_i]) + +pl_sb_C_o_i_stems = joinstem(-1, pl_sb_C_o_i) + +# ALWAYS "..o" -> "..os" + +pl_sb_U_o_os_complete = {"ado", "ISO", "NATO", "NCO", "NGO", "oto"} +si_sb_U_o_os_complete = {f"{w}s" for w in pl_sb_U_o_os_complete} + + +pl_sb_U_o_os_endings = [ + "aficionado", + "aggro", + "albino", + "allegro", + "ammo", + "Antananarivo", + "archipelago", + "armadillo", + "auto", + "avocado", + "Bamako", + "Barquisimeto", + "bimbo", + "bingo", + "Biro", + "bolero", + "Bolzano", + "bongo", + "Boto", + "burro", + "Cairo", + "canto", + "cappuccino", + "casino", + "cello", + "Chicago", + "Chimango", + "cilantro", + "cochito", + "coco", + "Colombo", + "Colorado", + "commando", + "concertino", + "contango", + "credo", + "crescendo", + "cyano", + "demo", + "ditto", + "Draco", + "dynamo", + "embryo", + "Esperanto", + "espresso", + "euro", + "falsetto", + "Faro", + "fiasco", + "Filipino", + "flamenco", + "furioso", + "generalissimo", + "Gestapo", + "ghetto", + "gigolo", + "gizmo", + "Greensboro", + "gringo", + "Guaiabero", + "guano", + "gumbo", + "gyro", + "hairdo", + "hippo", + "Idaho", + "impetigo", + "inferno", + "info", + "intermezzo", + "intertrigo", + "Iquico", + "jumbo", + "junto", + "Kakapo", + "kilo", + "Kinkimavo", + "Kokako", + "Kosovo", + "Lesotho", + "libero", + "libido", + "libretto", + "lido", + "Lilo", + "limbo", + "limo", + "lineno", + "lingo", + "lino", + "livedo", + "loco", + "logo", + "lumbago", + "macho", + "macro", + "mafioso", + "magneto", + "magnifico", + "Majuro", + "Malabo", + "manifesto", + "Maputo", + "Maracaibo", + "medico", + "memo", + "metro", + "Mexico", + "micro", + "Milano", + "Monaco", + "mono", + "Montenegro", + "Morocco", + "Muqdisho", + "myo", + "neutrino", + "Ningbo", + "octavo", + "oregano", + "Orinoco", + "Orlando", + "Oslo", + "panto", + "Paramaribo", + "Pardusco", + "pedalo", + "photo", + "pimento", + "pinto", + "pleco", + "Pluto", + "pogo", + "polo", + "poncho", + "Porto-Novo", + "Porto", + "pro", + "psycho", + "pueblo", + "quarto", + "Quito", + "repo", + "rhino", + "risotto", + "rococo", + "rondo", + "Sacramento", + "saddo", + "sago", + "salvo", + "Santiago", + "Sapporo", + "Sarajevo", + "scherzando", + "scherzo", + "silo", + "sirocco", + "sombrero", + "staccato", + "sterno", + "stucco", + "stylo", + "sumo", + "Taiko", + "techno", + "terrazzo", + "testudo", + "timpano", + "tiro", + "tobacco", + "Togo", + "Tokyo", + "torero", + "Torino", + "Toronto", + "torso", + "tremolo", + "typo", + "tyro", + "ufo", + "UNESCO", + "vaquero", + "vermicello", + "verso", + "vibrato", + "violoncello", + "Virgo", + "weirdo", + "WHO", + "WTO", + "Yamoussoukro", + "yo-yo", + "zero", + "Zibo", +] + pl_sb_C_o_i + +pl_sb_U_o_os_bysize = bysize(pl_sb_U_o_os_endings) +si_sb_U_o_os_bysize = bysize([f"{w}s" for w in pl_sb_U_o_os_endings]) + + +# UNCONDITIONAL "..ch" -> "..chs" + +pl_sb_U_ch_chs_list = ("czech", "eunuch", "stomach") + +( + si_sb_U_ch_chs_list, + si_sb_U_ch_chs_bysize, + pl_sb_U_ch_chs_bysize, + pl_sb_U_ch_chs, +) = make_pl_si_lists(pl_sb_U_ch_chs_list, "s", None) + + +# UNCONDITIONAL "..[ei]x" -> "..ices" + +pl_sb_U_ex_ices_list = ("codex", "murex", "silex") +( + si_sb_U_ex_ices_list, + si_sb_U_ex_ices_bysize, + pl_sb_U_ex_ices_bysize, + pl_sb_U_ex_ices, +) = make_pl_si_lists(pl_sb_U_ex_ices_list, "ices", 2) + +pl_sb_U_ix_ices_list = ("radix", "helix") +( + si_sb_U_ix_ices_list, + si_sb_U_ix_ices_bysize, + pl_sb_U_ix_ices_bysize, + pl_sb_U_ix_ices, +) = make_pl_si_lists(pl_sb_U_ix_ices_list, "ices", 2) + +# CLASSICAL "..[ei]x" -> "..ices" + +pl_sb_C_ex_ices_list = ( + "vortex", + "vertex", + "cortex", + "latex", + "pontifex", + "apex", + "index", + "simplex", +) + +( + si_sb_C_ex_ices_list, + si_sb_C_ex_ices_bysize, + pl_sb_C_ex_ices_bysize, + pl_sb_C_ex_ices, +) = make_pl_si_lists(pl_sb_C_ex_ices_list, "ices", 2) + + +pl_sb_C_ix_ices_list = ("appendix",) + +( + si_sb_C_ix_ices_list, + si_sb_C_ix_ices_bysize, + pl_sb_C_ix_ices_bysize, + pl_sb_C_ix_ices, +) = make_pl_si_lists(pl_sb_C_ix_ices_list, "ices", 2) + + +# ARABIC: ".." -> "..i" + +pl_sb_C_i_list = ("afrit", "afreet", "efreet") + +(si_sb_C_i_list, si_sb_C_i_bysize, pl_sb_C_i_bysize, pl_sb_C_i) = make_pl_si_lists( + pl_sb_C_i_list, "i", None +) + + +# HEBREW: ".." -> "..im" + +pl_sb_C_im_list = ("goy", "seraph", "cherub") + +(si_sb_C_im_list, si_sb_C_im_bysize, pl_sb_C_im_bysize, pl_sb_C_im) = make_pl_si_lists( + pl_sb_C_im_list, "im", None +) + + +# UNCONDITIONAL "..man" -> "..mans" + +pl_sb_U_man_mans_list = """ + ataman caiman cayman ceriman + desman dolman farman harman hetman + human leman ottoman shaman talisman +""".split() +pl_sb_U_man_mans_caps_list = """ + Alabaman Bahaman Burman German + Hiroshiman Liman Nakayaman Norman Oklahoman + Panaman Roman Selman Sonaman Tacoman Yakiman + Yokohaman Yuman +""".split() + +( + si_sb_U_man_mans_list, + si_sb_U_man_mans_bysize, + pl_sb_U_man_mans_bysize, +) = make_pl_si_lists(pl_sb_U_man_mans_list, "s", None, dojoinstem=False) +( + si_sb_U_man_mans_caps_list, + si_sb_U_man_mans_caps_bysize, + pl_sb_U_man_mans_caps_bysize, +) = make_pl_si_lists(pl_sb_U_man_mans_caps_list, "s", None, dojoinstem=False) + +# UNCONDITIONAL "..louse" -> "..lice" +pl_sb_U_louse_lice_list = ("booklouse", "grapelouse", "louse", "woodlouse") + +( + si_sb_U_louse_lice_list, + si_sb_U_louse_lice_bysize, + pl_sb_U_louse_lice_bysize, +) = make_pl_si_lists(pl_sb_U_louse_lice_list, "lice", 5, dojoinstem=False) + +pl_sb_uninflected_s_complete = [ + # PAIRS OR GROUPS SUBSUMED TO A SINGULAR... + "breeches", + "britches", + "pajamas", + "pyjamas", + "clippers", + "gallows", + "hijinks", + "headquarters", + "pliers", + "scissors", + "testes", + "herpes", + "pincers", + "shears", + "proceedings", + "trousers", + # UNASSIMILATED LATIN 4th DECLENSION + "cantus", + "coitus", + "nexus", + # RECENT IMPORTS... + "contretemps", + "corps", + "debris", + "siemens", + # DISEASES + "mumps", + # MISCELLANEOUS OTHERS... + "diabetes", + "jackanapes", + "series", + "species", + "subspecies", + "rabies", + "chassis", + "innings", + "news", + "mews", + "haggis", +] + +pl_sb_uninflected_s_endings = [ + # RECENT IMPORTS... + "ois", + # DISEASES + "measles", +] + +pl_sb_uninflected_s = pl_sb_uninflected_s_complete + [ + f".*{w}" for w in pl_sb_uninflected_s_endings +] + +pl_sb_uninflected_herd = ( + # DON'T INFLECT IN CLASSICAL MODE, OTHERWISE NORMAL INFLECTION + "wildebeest", + "swine", + "eland", + "bison", + "buffalo", + "cattle", + "elk", + "rhinoceros", + "zucchini", + "caribou", + "dace", + "grouse", + "guinea fowl", + "guinea-fowl", + "haddock", + "hake", + "halibut", + "herring", + "mackerel", + "pickerel", + "pike", + "roe", + "seed", + "shad", + "snipe", + "teal", + "turbot", + "water fowl", + "water-fowl", +) + +pl_sb_uninflected_complete = [ + # SOME FISH AND HERD ANIMALS + "tuna", + "salmon", + "mackerel", + "trout", + "bream", + "sea-bass", + "sea bass", + "carp", + "cod", + "flounder", + "whiting", + "moose", + # OTHER ODDITIES + "graffiti", + "djinn", + "samuri", + "offspring", + "pence", + "quid", + "hertz", +] + pl_sb_uninflected_s_complete +# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE) + +pl_sb_uninflected_caps = [ + # ALL NATIONALS ENDING IN -ese + "Portuguese", + "Amoyese", + "Borghese", + "Congoese", + "Faroese", + "Foochowese", + "Genevese", + "Genoese", + "Gilbertese", + "Hottentotese", + "Kiplingese", + "Kongoese", + "Lucchese", + "Maltese", + "Nankingese", + "Niasese", + "Pekingese", + "Piedmontese", + "Pistoiese", + "Sarawakese", + "Shavese", + "Vermontese", + "Wenchowese", + "Yengeese", +] + + +pl_sb_uninflected_endings = [ + # UNCOUNTABLE NOUNS + "butter", + "cash", + "furniture", + "information", + # SOME FISH AND HERD ANIMALS + "fish", + "deer", + "sheep", + # ALL NATIONALS ENDING IN -ese + "nese", + "rese", + "lese", + "mese", + # DISEASES + "pox", + # OTHER ODDITIES + "craft", +] + pl_sb_uninflected_s_endings +# SOME WORDS ENDING IN ...s (OFTEN PAIRS TAKEN AS A WHOLE) + + +pl_sb_uninflected_bysize = bysize(pl_sb_uninflected_endings) + + +# SINGULAR WORDS ENDING IN ...s (ALL INFLECT WITH ...es) + +pl_sb_singular_s_complete = [ + "acropolis", + "aegis", + "alias", + "asbestos", + "bathos", + "bias", + "bronchitis", + "bursitis", + "caddis", + "cannabis", + "canvas", + "chaos", + "cosmos", + "dais", + "digitalis", + "epidermis", + "ethos", + "eyas", + "gas", + "glottis", + "hubris", + "ibis", + "lens", + "mantis", + "marquis", + "metropolis", + "pathos", + "pelvis", + "polis", + "rhinoceros", + "sassafras", + "trellis", +] + pl_sb_C_is_ides_complete + + +pl_sb_singular_s_endings = ["ss", "us"] + pl_sb_C_is_ides_endings + +pl_sb_singular_s_bysize = bysize(pl_sb_singular_s_endings) + +si_sb_singular_s_complete = [f"{w}es" for w in pl_sb_singular_s_complete] +si_sb_singular_s_endings = [f"{w}es" for w in pl_sb_singular_s_endings] +si_sb_singular_s_bysize = bysize(si_sb_singular_s_endings) + +pl_sb_singular_s_es = ["[A-Z].*es"] + +pl_sb_singular_s = enclose( + "|".join( + pl_sb_singular_s_complete + + [f".*{w}" for w in pl_sb_singular_s_endings] + + pl_sb_singular_s_es + ) +) + + +# PLURALS ENDING IN uses -> use + + +si_sb_ois_oi_case = ("Bolshois", "Hanois") + +si_sb_uses_use_case = ("Betelgeuses", "Duses", "Meuses", "Syracuses", "Toulouses") + +si_sb_uses_use = ( + "abuses", + "applauses", + "blouses", + "carouses", + "causes", + "chartreuses", + "clauses", + "contuses", + "douses", + "excuses", + "fuses", + "grouses", + "hypotenuses", + "masseuses", + "menopauses", + "misuses", + "muses", + "overuses", + "pauses", + "peruses", + "profuses", + "recluses", + "reuses", + "ruses", + "souses", + "spouses", + "suffuses", + "transfuses", + "uses", +) + +si_sb_ies_ie_case = ( + "Addies", + "Aggies", + "Allies", + "Amies", + "Angies", + "Annies", + "Annmaries", + "Archies", + "Arties", + "Aussies", + "Barbies", + "Barries", + "Basies", + "Bennies", + "Bernies", + "Berties", + "Bessies", + "Betties", + "Billies", + "Blondies", + "Bobbies", + "Bonnies", + "Bowies", + "Brandies", + "Bries", + "Brownies", + "Callies", + "Carnegies", + "Carries", + "Cassies", + "Charlies", + "Cheries", + "Christies", + "Connies", + "Curies", + "Dannies", + "Debbies", + "Dixies", + "Dollies", + "Donnies", + "Drambuies", + "Eddies", + "Effies", + "Ellies", + "Elsies", + "Eries", + "Ernies", + "Essies", + "Eugenies", + "Fannies", + "Flossies", + "Frankies", + "Freddies", + "Gillespies", + "Goldies", + "Gracies", + "Guthries", + "Hallies", + "Hatties", + "Hetties", + "Hollies", + "Jackies", + "Jamies", + "Janies", + "Jannies", + "Jeanies", + "Jeannies", + "Jennies", + "Jessies", + "Jimmies", + "Jodies", + "Johnies", + "Johnnies", + "Josies", + "Julies", + "Kalgoorlies", + "Kathies", + "Katies", + "Kellies", + "Kewpies", + "Kristies", + "Laramies", + "Lassies", + "Lauries", + "Leslies", + "Lessies", + "Lillies", + "Lizzies", + "Lonnies", + "Lories", + "Lorries", + "Lotties", + "Louies", + "Mackenzies", + "Maggies", + "Maisies", + "Mamies", + "Marcies", + "Margies", + "Maries", + "Marjories", + "Matties", + "McKenzies", + "Melanies", + "Mickies", + "Millies", + "Minnies", + "Mollies", + "Mounties", + "Nannies", + "Natalies", + "Nellies", + "Netties", + "Ollies", + "Ozzies", + "Pearlies", + "Pottawatomies", + "Reggies", + "Richies", + "Rickies", + "Robbies", + "Ronnies", + "Rosalies", + "Rosemaries", + "Rosies", + "Roxies", + "Rushdies", + "Ruthies", + "Sadies", + "Sallies", + "Sammies", + "Scotties", + "Selassies", + "Sherries", + "Sophies", + "Stacies", + "Stefanies", + "Stephanies", + "Stevies", + "Susies", + "Sylvies", + "Tammies", + "Terries", + "Tessies", + "Tommies", + "Tracies", + "Trekkies", + "Valaries", + "Valeries", + "Valkyries", + "Vickies", + "Virgies", + "Willies", + "Winnies", + "Wylies", + "Yorkies", +) + +si_sb_ies_ie = ( + "aeries", + "baggies", + "belies", + "biggies", + "birdies", + "bogies", + "bonnies", + "boogies", + "bookies", + "bourgeoisies", + "brownies", + "budgies", + "caddies", + "calories", + "camaraderies", + "cockamamies", + "collies", + "cookies", + "coolies", + "cooties", + "coteries", + "crappies", + "curies", + "cutesies", + "dogies", + "eyries", + "floozies", + "footsies", + "freebies", + "genies", + "goalies", + "groupies", + "hies", + "jalousies", + "junkies", + "kiddies", + "laddies", + "lassies", + "lies", + "lingeries", + "magpies", + "menageries", + "mommies", + "movies", + "neckties", + "newbies", + "nighties", + "oldies", + "organdies", + "overlies", + "pies", + "pinkies", + "pixies", + "potpies", + "prairies", + "quickies", + "reveries", + "rookies", + "rotisseries", + "softies", + "sorties", + "species", + "stymies", + "sweeties", + "ties", + "underlies", + "unties", + "veggies", + "vies", + "yuppies", + "zombies", +) + + +si_sb_oes_oe_case = ( + "Chloes", + "Crusoes", + "Defoes", + "Faeroes", + "Ivanhoes", + "Joes", + "McEnroes", + "Moes", + "Monroes", + "Noes", + "Poes", + "Roscoes", + "Tahoes", + "Tippecanoes", + "Zoes", +) + +si_sb_oes_oe = ( + "aloes", + "backhoes", + "canoes", + "does", + "floes", + "foes", + "hoes", + "mistletoes", + "oboes", + "pekoes", + "roes", + "sloes", + "throes", + "tiptoes", + "toes", + "woes", +) + +si_sb_z_zes = ("quartzes", "topazes") + +si_sb_zzes_zz = ("buzzes", "fizzes", "frizzes", "razzes") + +si_sb_ches_che_case = ( + "Andromaches", + "Apaches", + "Blanches", + "Comanches", + "Nietzsches", + "Porsches", + "Roches", +) + +si_sb_ches_che = ( + "aches", + "avalanches", + "backaches", + "bellyaches", + "caches", + "cloches", + "creches", + "douches", + "earaches", + "fiches", + "headaches", + "heartaches", + "microfiches", + "niches", + "pastiches", + "psyches", + "quiches", + "stomachaches", + "toothaches", + "tranches", +) + +si_sb_xes_xe = ("annexes", "axes", "deluxes", "pickaxes") + +si_sb_sses_sse_case = ("Hesses", "Jesses", "Larousses", "Matisses") +si_sb_sses_sse = ( + "bouillabaisses", + "crevasses", + "demitasses", + "impasses", + "mousses", + "posses", +) + +si_sb_ves_ve_case = ( + # *[nwl]ives -> [nwl]live + "Clives", + "Palmolives", +) +si_sb_ves_ve = ( + # *[^d]eaves -> eave + "interweaves", + "weaves", + # *[nwl]ives -> [nwl]live + "olives", + # *[eoa]lves -> [eoa]lve + "bivalves", + "dissolves", + "resolves", + "salves", + "twelves", + "valves", +) + + +plverb_special_s = enclose( + "|".join( + [pl_sb_singular_s] + + pl_sb_uninflected_s + + list(pl_sb_irregular_s) + + ["(.*[csx])is", "(.*)ceps", "[A-Z].*s"] + ) +) + +_pl_sb_postfix_adj_defn = ( + ("general", enclose(r"(?!major|lieutenant|brigadier|adjutant|.*star)\S+")), + ("martial", enclose("court")), + ("force", enclose("pound")), +) + +pl_sb_postfix_adj: Iterable[str] = ( + enclose(val + f"(?=(?:-|\\s+){key})") for key, val in _pl_sb_postfix_adj_defn +) + +pl_sb_postfix_adj_stems = f"({'|'.join(pl_sb_postfix_adj)})(.*)" + + +# PLURAL WORDS ENDING IS es GO TO SINGULAR is + +si_sb_es_is = ( + "amanuenses", + "amniocenteses", + "analyses", + "antitheses", + "apotheoses", + "arterioscleroses", + "atheroscleroses", + "axes", + # 'bases', # bases -> basis + "catalyses", + "catharses", + "chasses", + "cirrhoses", + "cocces", + "crises", + "diagnoses", + "dialyses", + "diereses", + "electrolyses", + "emphases", + "exegeses", + "geneses", + "halitoses", + "hydrolyses", + "hypnoses", + "hypotheses", + "hystereses", + "metamorphoses", + "metastases", + "misdiagnoses", + "mitoses", + "mononucleoses", + "narcoses", + "necroses", + "nemeses", + "neuroses", + "oases", + "osmoses", + "osteoporoses", + "paralyses", + "parentheses", + "parthenogeneses", + "periphrases", + "photosyntheses", + "probosces", + "prognoses", + "prophylaxes", + "prostheses", + "preces", + "psoriases", + "psychoanalyses", + "psychokineses", + "psychoses", + "scleroses", + "scolioses", + "sepses", + "silicoses", + "symbioses", + "synopses", + "syntheses", + "taxes", + "telekineses", + "theses", + "thromboses", + "tuberculoses", + "urinalyses", +) + +pl_prep_list = """ + about above across after among around at athwart before behind + below beneath beside besides between betwixt beyond but by + during except for from in into near of off on onto out over + since till to under until unto upon with""".split() + +pl_prep_list_da = pl_prep_list + ["de", "du", "da"] + +pl_prep_bysize = bysize(pl_prep_list_da) + +pl_prep = enclose("|".join(pl_prep_list_da)) + +pl_sb_prep_dual_compound = fr"(.*?)((?:-|\s+)(?:{pl_prep})(?:-|\s+))a(?:-|\s+)(.*)" + + +singular_pronoun_genders = { + "neuter", + "feminine", + "masculine", + "gender-neutral", + "feminine or masculine", + "masculine or feminine", +} + +pl_pron_nom = { + # NOMINATIVE REFLEXIVE + "i": "we", + "myself": "ourselves", + "you": "you", + "yourself": "yourselves", + "she": "they", + "herself": "themselves", + "he": "they", + "himself": "themselves", + "it": "they", + "itself": "themselves", + "they": "they", + "themself": "themselves", + # POSSESSIVE + "mine": "ours", + "yours": "yours", + "hers": "theirs", + "his": "theirs", + "its": "theirs", + "theirs": "theirs", +} + +si_pron: Dict[str, Dict[str, Union[str, Dict[str, str]]]] = { + "nom": {v: k for (k, v) in pl_pron_nom.items()} +} +si_pron["nom"]["we"] = "I" + + +pl_pron_acc = { + # ACCUSATIVE REFLEXIVE + "me": "us", + "myself": "ourselves", + "you": "you", + "yourself": "yourselves", + "her": "them", + "herself": "themselves", + "him": "them", + "himself": "themselves", + "it": "them", + "itself": "themselves", + "them": "them", + "themself": "themselves", +} + +pl_pron_acc_keys = enclose("|".join(pl_pron_acc)) +pl_pron_acc_keys_bysize = bysize(pl_pron_acc) + +si_pron["acc"] = {v: k for (k, v) in pl_pron_acc.items()} + +for _thecase, _plur, _gend, _sing in ( + ("nom", "they", "neuter", "it"), + ("nom", "they", "feminine", "she"), + ("nom", "they", "masculine", "he"), + ("nom", "they", "gender-neutral", "they"), + ("nom", "they", "feminine or masculine", "she or he"), + ("nom", "they", "masculine or feminine", "he or she"), + ("nom", "themselves", "neuter", "itself"), + ("nom", "themselves", "feminine", "herself"), + ("nom", "themselves", "masculine", "himself"), + ("nom", "themselves", "gender-neutral", "themself"), + ("nom", "themselves", "feminine or masculine", "herself or himself"), + ("nom", "themselves", "masculine or feminine", "himself or herself"), + ("nom", "theirs", "neuter", "its"), + ("nom", "theirs", "feminine", "hers"), + ("nom", "theirs", "masculine", "his"), + ("nom", "theirs", "gender-neutral", "theirs"), + ("nom", "theirs", "feminine or masculine", "hers or his"), + ("nom", "theirs", "masculine or feminine", "his or hers"), + ("acc", "them", "neuter", "it"), + ("acc", "them", "feminine", "her"), + ("acc", "them", "masculine", "him"), + ("acc", "them", "gender-neutral", "them"), + ("acc", "them", "feminine or masculine", "her or him"), + ("acc", "them", "masculine or feminine", "him or her"), + ("acc", "themselves", "neuter", "itself"), + ("acc", "themselves", "feminine", "herself"), + ("acc", "themselves", "masculine", "himself"), + ("acc", "themselves", "gender-neutral", "themself"), + ("acc", "themselves", "feminine or masculine", "herself or himself"), + ("acc", "themselves", "masculine or feminine", "himself or herself"), +): + try: + si_pron[_thecase][_plur][_gend] = _sing # type: ignore + except TypeError: + si_pron[_thecase][_plur] = {} + si_pron[_thecase][_plur][_gend] = _sing # type: ignore + + +si_pron_acc_keys = enclose("|".join(si_pron["acc"])) +si_pron_acc_keys_bysize = bysize(si_pron["acc"]) + + +def get_si_pron(thecase, word, gender) -> str: + try: + sing = si_pron[thecase][word] + except KeyError: + raise # not a pronoun + try: + return sing[gender] # has several types due to gender + except TypeError: + return cast(str, sing) # answer independent of gender + + +# These dictionaries group verbs by first, second and third person +# conjugations. + +plverb_irregular_pres = { + "am": "are", + "are": "are", + "is": "are", + "was": "were", + "were": "were", + "was": "were", + "have": "have", + "have": "have", + "has": "have", + "do": "do", + "do": "do", + "does": "do", +} + +plverb_ambiguous_pres = { + "act": "act", + "act": "act", + "acts": "act", + "blame": "blame", + "blame": "blame", + "blames": "blame", + "can": "can", + "can": "can", + "can": "can", + "must": "must", + "must": "must", + "must": "must", + "fly": "fly", + "fly": "fly", + "flies": "fly", + "copy": "copy", + "copy": "copy", + "copies": "copy", + "drink": "drink", + "drink": "drink", + "drinks": "drink", + "fight": "fight", + "fight": "fight", + "fights": "fight", + "fire": "fire", + "fire": "fire", + "fires": "fire", + "like": "like", + "like": "like", + "likes": "like", + "look": "look", + "look": "look", + "looks": "look", + "make": "make", + "make": "make", + "makes": "make", + "reach": "reach", + "reach": "reach", + "reaches": "reach", + "run": "run", + "run": "run", + "runs": "run", + "sink": "sink", + "sink": "sink", + "sinks": "sink", + "sleep": "sleep", + "sleep": "sleep", + "sleeps": "sleep", + "view": "view", + "view": "view", + "views": "view", +} + +plverb_ambiguous_pres_keys = re.compile( + fr"^({enclose('|'.join(plverb_ambiguous_pres))})((\s.*)?)$", re.IGNORECASE +) + + +plverb_irregular_non_pres = ( + "did", + "had", + "ate", + "made", + "put", + "spent", + "fought", + "sank", + "gave", + "sought", + "shall", + "could", + "ought", + "should", +) + +plverb_ambiguous_non_pres = re.compile( + r"^((?:thought|saw|bent|will|might|cut))((\s.*)?)$", re.IGNORECASE +) + +# "..oes" -> "..oe" (the rest are "..oes" -> "o") + +pl_v_oes_oe = ("canoes", "floes", "oboes", "roes", "throes", "woes") +pl_v_oes_oe_endings_size4 = ("hoes", "toes") +pl_v_oes_oe_endings_size5 = ("shoes",) + + +pl_count_zero = ("0", "no", "zero", "nil") + + +pl_count_one = ("1", "a", "an", "one", "each", "every", "this", "that") + +pl_adj_special = {"a": "some", "an": "some", "this": "these", "that": "those"} + +pl_adj_special_keys = re.compile( + fr"^({enclose('|'.join(pl_adj_special))})$", re.IGNORECASE +) + +pl_adj_poss = { + "my": "our", + "your": "your", + "its": "their", + "her": "their", + "his": "their", + "their": "their", +} + +pl_adj_poss_keys = re.compile(fr"^({enclose('|'.join(pl_adj_poss))})$", re.IGNORECASE) + + +# 2. INDEFINITE ARTICLES + +# THIS PATTERN MATCHES STRINGS OF CAPITALS STARTING WITH A "VOWEL-SOUND" +# CONSONANT FOLLOWED BY ANOTHER CONSONANT, AND WHICH ARE NOT LIKELY +# TO BE REAL WORDS (OH, ALL RIGHT THEN, IT'S JUST MAGIC!) + +A_abbrev = re.compile( + r""" +(?! FJO | [HLMNS]Y. | RY[EO] | SQU + | ( F[LR]? | [HL] | MN? | N | RH? | S[CHKLMNPTVW]? | X(YL)?) [AEIOU]) +[FHLMNRSX][A-Z] +""", + re.VERBOSE, +) + +# THIS PATTERN CODES THE BEGINNINGS OF ALL ENGLISH WORDS BEGINING WITH A +# 'y' FOLLOWED BY A CONSONANT. ANY OTHER Y-CONSONANT PREFIX THEREFORE +# IMPLIES AN ABBREVIATION. + +A_y_cons = re.compile(r"^(y(b[lor]|cl[ea]|fere|gg|p[ios]|rou|tt))", re.IGNORECASE) + +# EXCEPTIONS TO EXCEPTIONS + +A_explicit_a = re.compile(r"^((?:unabomber|unanimous|US))", re.IGNORECASE) + +A_explicit_an = re.compile( + r"^((?:euler|hour(?!i)|heir|honest|hono[ur]|mpeg))", re.IGNORECASE +) + +A_ordinal_an = re.compile(r"^([aefhilmnorsx]-?th)", re.IGNORECASE) + +A_ordinal_a = re.compile(r"^([bcdgjkpqtuvwyz]-?th)", re.IGNORECASE) + + +# NUMERICAL INFLECTIONS + +nth = { + 0: "th", + 1: "st", + 2: "nd", + 3: "rd", + 4: "th", + 5: "th", + 6: "th", + 7: "th", + 8: "th", + 9: "th", + 11: "th", + 12: "th", + 13: "th", +} +nth_suff = set(nth.values()) + +ordinal = dict( + ty="tieth", + one="first", + two="second", + three="third", + five="fifth", + eight="eighth", + nine="ninth", + twelve="twelfth", +) + +ordinal_suff = re.compile(fr"({'|'.join(ordinal)})\Z") + + +# NUMBERS + +unit = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"] +teen = [ + "ten", + "eleven", + "twelve", + "thirteen", + "fourteen", + "fifteen", + "sixteen", + "seventeen", + "eighteen", + "nineteen", +] +ten = [ + "", + "", + "twenty", + "thirty", + "forty", + "fifty", + "sixty", + "seventy", + "eighty", + "ninety", +] +mill = [ + " ", + " thousand", + " million", + " billion", + " trillion", + " quadrillion", + " quintillion", + " sextillion", + " septillion", + " octillion", + " nonillion", + " decillion", +] + + +# SUPPORT CLASSICAL PLURALIZATIONS + +def_classical = dict( + all=False, zero=False, herd=False, names=True, persons=False, ancient=False +) + +all_classical = {k: True for k in def_classical} +no_classical = {k: False for k in def_classical} + + +# Maps strings to built-in constant types +string_to_constant = {"True": True, "False": False, "None": None} + + +# Pre-compiled regular expression objects +DOLLAR_DIGITS = re.compile(r"\$(\d+)") +FUNCTION_CALL = re.compile(r"((\w+)\([^)]*\)*)", re.IGNORECASE) +PARTITION_WORD = re.compile(r"\A(\s*)(.+?)(\s*)\Z") +PL_SB_POSTFIX_ADJ_STEMS_RE = re.compile( + fr"^(?:{pl_sb_postfix_adj_stems})$", re.IGNORECASE +) +PL_SB_PREP_DUAL_COMPOUND_RE = re.compile( + fr"^(?:{pl_sb_prep_dual_compound})$", re.IGNORECASE +) +DENOMINATOR = re.compile(r"(?P.+)( (per|a) .+)") +PLVERB_SPECIAL_S_RE = re.compile(fr"^({plverb_special_s})$") +WHITESPACE = re.compile(r"\s") +ENDS_WITH_S = re.compile(r"^(.*[^s])s$", re.IGNORECASE) +ENDS_WITH_APOSTROPHE_S = re.compile(r"^(.*)'s?$") +INDEFINITE_ARTICLE_TEST = re.compile(r"\A(\s*)(?:an?\s+)?(.+?)(\s*)\Z", re.IGNORECASE) +SPECIAL_AN = re.compile(r"^[aefhilmnorsx]$", re.IGNORECASE) +SPECIAL_A = re.compile(r"^[bcdgjkpqtuvwyz]$", re.IGNORECASE) +SPECIAL_ABBREV_AN = re.compile(r"^[aefhilmnorsx][.-]", re.IGNORECASE) +SPECIAL_ABBREV_A = re.compile(r"^[a-z][.-]", re.IGNORECASE) +CONSONANTS = re.compile(r"^[^aeiouy]", re.IGNORECASE) +ARTICLE_SPECIAL_EU = re.compile(r"^e[uw]", re.IGNORECASE) +ARTICLE_SPECIAL_ONCE = re.compile(r"^onc?e\b", re.IGNORECASE) +ARTICLE_SPECIAL_ONETIME = re.compile(r"^onetime\b", re.IGNORECASE) +ARTICLE_SPECIAL_UNIT = re.compile(r"^uni([^nmd]|mo)", re.IGNORECASE) +ARTICLE_SPECIAL_UBA = re.compile(r"^u[bcfghjkqrst][aeiou]", re.IGNORECASE) +ARTICLE_SPECIAL_UKR = re.compile(r"^ukr", re.IGNORECASE) +SPECIAL_CAPITALS = re.compile(r"^U[NK][AIEO]?") +VOWELS = re.compile(r"^[aeiou]", re.IGNORECASE) + +DIGIT_GROUP = re.compile(r"(\d)") +TWO_DIGITS = re.compile(r"(\d)(\d)") +THREE_DIGITS = re.compile(r"(\d)(\d)(\d)") +THREE_DIGITS_WORD = re.compile(r"(\d)(\d)(\d)(?=\D*\Z)") +TWO_DIGITS_WORD = re.compile(r"(\d)(\d)(?=\D*\Z)") +ONE_DIGIT_WORD = re.compile(r"(\d)(?=\D*\Z)") + +FOUR_DIGIT_COMMA = re.compile(r"(\d)(\d{3}(?:,|\Z))") +NON_DIGIT = re.compile(r"\D") +WHITESPACES_COMMA = re.compile(r"\s+,") +COMMA_WORD = re.compile(r", (\S+)\s+\Z") +WHITESPACES = re.compile(r"\s+") + + +PRESENT_PARTICIPLE_REPLACEMENTS = ( + (re.compile(r"ie$"), r"y"), + ( + re.compile(r"ue$"), + r"u", + ), # TODO: isn't ue$ -> u encompassed in the following rule? + (re.compile(r"([auy])e$"), r"\g<1>"), + (re.compile(r"ski$"), r"ski"), + (re.compile(r"[^b]i$"), r""), + (re.compile(r"^(are|were)$"), r"be"), + (re.compile(r"^(had)$"), r"hav"), + (re.compile(r"^(hoe)$"), r"\g<1>"), + (re.compile(r"([^e])e$"), r"\g<1>"), + (re.compile(r"er$"), r"er"), + (re.compile(r"([^aeiou][aeiouy]([bdgmnprst]))$"), r"\g<1>\g<2>"), +) + +DIGIT = re.compile(r"\d") + + +class Words(str): + lowered: str + split_: List[str] + first: str + last: str + + def __init__(self, orig) -> None: + self.lowered = self.lower() + self.split_ = self.split() + self.first = self.split_[0] + self.last = self.split_[-1] + + +Word = Annotated[str, Field(min_length=1)] +Falsish = Any # ideally, falsish would only validate on bool(value) is False + + +class engine: + def __init__(self) -> None: + + self.classical_dict = def_classical.copy() + self.persistent_count: Optional[int] = None + self.mill_count = 0 + self.pl_sb_user_defined: List[str] = [] + self.pl_v_user_defined: List[str] = [] + self.pl_adj_user_defined: List[str] = [] + self.si_sb_user_defined: List[str] = [] + self.A_a_user_defined: List[str] = [] + self.thegender = "neuter" + self.__number_args: Optional[Dict[str, str]] = None + + @property + def _number_args(self): + return cast(Dict[str, str], self.__number_args) + + @_number_args.setter + def _number_args(self, val): + self.__number_args = val + + deprecated_methods = dict( + pl="plural", + plnoun="plural_noun", + plverb="plural_verb", + pladj="plural_adj", + sinoun="single_noun", + prespart="present_participle", + numwords="number_to_words", + plequal="compare", + plnounequal="compare_nouns", + plverbequal="compare_verbs", + pladjequal="compare_adjs", + wordlist="join", + ) + + def __getattr__(self, meth): + if meth in self.deprecated_methods: + print3(f"{meth}() deprecated, use {self.deprecated_methods[meth]}()") + raise DeprecationWarning + raise AttributeError + + def defnoun(self, singular: str, plural: str) -> int: + """ + Set the noun plural of singular to plural. + + """ + self.checkpat(singular) + self.checkpatplural(plural) + self.pl_sb_user_defined.extend((singular, plural)) + self.si_sb_user_defined.extend((plural, singular)) + return 1 + + def defverb(self, s1: str, p1: str, s2: str, p2: str, s3: str, p3: str) -> int: + """ + Set the verb plurals for s1, s2 and s3 to p1, p2 and p3 respectively. + + Where 1, 2 and 3 represent the 1st, 2nd and 3rd person forms of the verb. + + """ + self.checkpat(s1) + self.checkpat(s2) + self.checkpat(s3) + self.checkpatplural(p1) + self.checkpatplural(p2) + self.checkpatplural(p3) + self.pl_v_user_defined.extend((s1, p1, s2, p2, s3, p3)) + return 1 + + def defadj(self, singular: str, plural: str) -> int: + """ + Set the adjective plural of singular to plural. + + """ + self.checkpat(singular) + self.checkpatplural(plural) + self.pl_adj_user_defined.extend((singular, plural)) + return 1 + + def defa(self, pattern: str) -> int: + """ + Define the indefinite article as 'a' for words matching pattern. + + """ + self.checkpat(pattern) + self.A_a_user_defined.extend((pattern, "a")) + return 1 + + def defan(self, pattern: str) -> int: + """ + Define the indefinite article as 'an' for words matching pattern. + + """ + self.checkpat(pattern) + self.A_a_user_defined.extend((pattern, "an")) + return 1 + + def checkpat(self, pattern: Optional[str]) -> None: + """ + check for errors in a regex pattern + """ + if pattern is None: + return + try: + re.match(pattern, "") + except re.error: + print3(f"\nBad user-defined singular pattern:\n\t{pattern}\n") + raise BadUserDefinedPatternError + + def checkpatplural(self, pattern: str) -> None: + """ + check for errors in a regex replace pattern + """ + return + + @validate_arguments + def ud_match(self, word: Word, wordlist: Sequence[Optional[Word]]) -> Optional[str]: + for i in range(len(wordlist) - 2, -2, -2): # backwards through even elements + mo = re.search(fr"^{wordlist[i]}$", word, re.IGNORECASE) + if mo: + if wordlist[i + 1] is None: + return None + pl = DOLLAR_DIGITS.sub( + r"\\1", cast(Word, wordlist[i + 1]) + ) # change $n to \n for expand + return mo.expand(pl) + return None + + def classical(self, **kwargs) -> None: + """ + turn classical mode on and off for various categories + + turn on all classical modes: + classical() + classical(all=True) + + turn on or off specific claassical modes: + e.g. + classical(herd=True) + classical(names=False) + + By default all classical modes are off except names. + + unknown value in args or key in kwargs raises + exception: UnknownClasicalModeError + + """ + if not kwargs: + self.classical_dict = all_classical.copy() + return + if "all" in kwargs: + if kwargs["all"]: + self.classical_dict = all_classical.copy() + else: + self.classical_dict = no_classical.copy() + + for k, v in kwargs.items(): + if k in def_classical: + self.classical_dict[k] = v + else: + raise UnknownClassicalModeError + + def num( + self, count: Optional[int] = None, show: Optional[int] = None + ) -> str: # (;$count,$show) + """ + Set the number to be used in other method calls. + + Returns count. + + Set show to False to return '' instead. + + """ + if count is not None: + try: + self.persistent_count = int(count) + except ValueError: + raise BadNumValueError + if (show is None) or show: + return str(count) + else: + self.persistent_count = None + return "" + + def gender(self, gender: str) -> None: + """ + set the gender for the singular of plural pronouns + + can be one of: + 'neuter' ('they' -> 'it') + 'feminine' ('they' -> 'she') + 'masculine' ('they' -> 'he') + 'gender-neutral' ('they' -> 'they') + 'feminine or masculine' ('they' -> 'she or he') + 'masculine or feminine' ('they' -> 'he or she') + """ + if gender in singular_pronoun_genders: + self.thegender = gender + else: + raise BadGenderError + + def _get_value_from_ast(self, obj): + """ + Return the value of the ast object. + """ + if isinstance(obj, ast.Num): + return obj.n + elif isinstance(obj, ast.Str): + return obj.s + elif isinstance(obj, ast.List): + return [self._get_value_from_ast(e) for e in obj.elts] + elif isinstance(obj, ast.Tuple): + return tuple([self._get_value_from_ast(e) for e in obj.elts]) + + # None, True and False are NameConstants in Py3.4 and above. + elif isinstance(obj, ast.NameConstant): + return obj.value + + # Probably passed a variable name. + # Or passed a single word without wrapping it in quotes as an argument + # ex: p.inflect("I plural(see)") instead of p.inflect("I plural('see')") + raise NameError(f"name '{obj.id}' is not defined") + + def _string_to_substitute( + self, mo: Match, methods_dict: Dict[str, Callable] + ) -> str: + """ + Return the string to be substituted for the match. + """ + matched_text, f_name = mo.groups() + # matched_text is the complete match string. e.g. plural_noun(cat) + # f_name is the function name. e.g. plural_noun + + # Return matched_text if function name is not in methods_dict + if f_name not in methods_dict: + return matched_text + + # Parse the matched text + a_tree = ast.parse(matched_text) + + # get the args and kwargs from ast objects + args_list = [ + self._get_value_from_ast(a) + for a in a_tree.body[0].value.args # type: ignore[attr-defined] + ] + kwargs_list = { + kw.arg: self._get_value_from_ast(kw.value) + for kw in a_tree.body[0].value.keywords # type: ignore[attr-defined] + } + + # Call the corresponding function + return methods_dict[f_name](*args_list, **kwargs_list) + + # 0. PERFORM GENERAL INFLECTIONS IN A STRING + + @validate_arguments + def inflect(self, text: Word) -> str: + """ + Perform inflections in a string. + + e.g. inflect('The plural of cat is plural(cat)') returns + 'The plural of cat is cats' + + can use plural, plural_noun, plural_verb, plural_adj, + singular_noun, a, an, no, ordinal, number_to_words, + and prespart + + """ + save_persistent_count = self.persistent_count + + # Dictionary of allowed methods + methods_dict: Dict[str, Callable] = { + "plural": self.plural, + "plural_adj": self.plural_adj, + "plural_noun": self.plural_noun, + "plural_verb": self.plural_verb, + "singular_noun": self.singular_noun, + "a": self.a, + "an": self.a, + "no": self.no, + "ordinal": self.ordinal, + "number_to_words": self.number_to_words, + "present_participle": self.present_participle, + "num": self.num, + } + + # Regular expression to find Python's function call syntax + output = FUNCTION_CALL.sub( + lambda mo: self._string_to_substitute(mo, methods_dict), text + ) + self.persistent_count = save_persistent_count + return output + + # ## PLURAL SUBROUTINES + + def postprocess(self, orig: str, inflected) -> str: + inflected = str(inflected) + if "|" in inflected: + word_options = inflected.split("|") + # When two parts of a noun need to be pluralized + if len(word_options[0].split(" ")) == len(word_options[1].split(" ")): + result = inflected.split("|")[self.classical_dict["all"]].split(" ") + # When only the last part of the noun needs to be pluralized + else: + result = inflected.split(" ") + for index, word in enumerate(result): + if "|" in word: + result[index] = word.split("|")[self.classical_dict["all"]] + else: + result = inflected.split(" ") + + # Try to fix word wise capitalization + for index, word in enumerate(orig.split(" ")): + if word == "I": + # Is this the only word for exceptions like this + # Where the original is fully capitalized + # without 'meaning' capitalization? + # Also this fails to handle a capitalizaion in context + continue + if word.capitalize() == word: + result[index] = result[index].capitalize() + if word == word.upper(): + result[index] = result[index].upper() + return " ".join(result) + + def partition_word(self, text: str) -> Tuple[str, str, str]: + mo = PARTITION_WORD.search(text) + if mo: + return mo.group(1), mo.group(2), mo.group(3) + else: + return "", "", "" + + @validate_arguments + def plural(self, text: Word, count: Optional[Union[str, int, Any]] = None) -> str: + """ + Return the plural of text. + + If count supplied, then return text if count is one of: + 1, a, an, one, each, every, this, that + + otherwise return the plural. + + Whitespace at the start and end is preserved. + + """ + pre, word, post = self.partition_word(text) + if not word: + return text + plural = self.postprocess( + word, + self._pl_special_adjective(word, count) + or self._pl_special_verb(word, count) + or self._plnoun(word, count), + ) + return f"{pre}{plural}{post}" + + @validate_arguments + def plural_noun( + self, text: Word, count: Optional[Union[str, int, Any]] = None + ) -> str: + """ + Return the plural of text, where text is a noun. + + If count supplied, then return text if count is one of: + 1, a, an, one, each, every, this, that + + otherwise return the plural. + + Whitespace at the start and end is preserved. + + """ + pre, word, post = self.partition_word(text) + if not word: + return text + plural = self.postprocess(word, self._plnoun(word, count)) + return f"{pre}{plural}{post}" + + @validate_arguments + def plural_verb( + self, text: Word, count: Optional[Union[str, int, Any]] = None + ) -> str: + """ + Return the plural of text, where text is a verb. + + If count supplied, then return text if count is one of: + 1, a, an, one, each, every, this, that + + otherwise return the plural. + + Whitespace at the start and end is preserved. + + """ + pre, word, post = self.partition_word(text) + if not word: + return text + plural = self.postprocess( + word, + self._pl_special_verb(word, count) or self._pl_general_verb(word, count), + ) + return f"{pre}{plural}{post}" + + @validate_arguments + def plural_adj( + self, text: Word, count: Optional[Union[str, int, Any]] = None + ) -> str: + """ + Return the plural of text, where text is an adjective. + + If count supplied, then return text if count is one of: + 1, a, an, one, each, every, this, that + + otherwise return the plural. + + Whitespace at the start and end is preserved. + + """ + pre, word, post = self.partition_word(text) + if not word: + return text + plural = self.postprocess(word, self._pl_special_adjective(word, count) or word) + return f"{pre}{plural}{post}" + + @validate_arguments + def compare(self, word1: Word, word2: Word) -> Union[str, bool]: + """ + compare word1 and word2 for equality regardless of plurality + + return values: + eq - the strings are equal + p:s - word1 is the plural of word2 + s:p - word2 is the plural of word1 + p:p - word1 and word2 are two different plural forms of the one word + False - otherwise + + >>> compare = engine().compare + >>> compare("egg", "eggs") + 's:p' + >>> compare('egg', 'egg') + 'eq' + + Words should not be empty. + + >>> compare('egg', '') + Traceback (most recent call last): + ... + pydantic.error_wrappers.ValidationError: 1 validation error for Compare + word2 + ensure this value has at least 1 characters... + """ + norms = self.plural_noun, self.plural_verb, self.plural_adj + results = (self._plequal(word1, word2, norm) for norm in norms) + return next(filter(None, results), False) + + @validate_arguments + def compare_nouns(self, word1: Word, word2: Word) -> Union[str, bool]: + """ + compare word1 and word2 for equality regardless of plurality + word1 and word2 are to be treated as nouns + + return values: + eq - the strings are equal + p:s - word1 is the plural of word2 + s:p - word2 is the plural of word1 + p:p - word1 and word2 are two different plural forms of the one word + False - otherwise + + """ + return self._plequal(word1, word2, self.plural_noun) + + @validate_arguments + def compare_verbs(self, word1: Word, word2: Word) -> Union[str, bool]: + """ + compare word1 and word2 for equality regardless of plurality + word1 and word2 are to be treated as verbs + + return values: + eq - the strings are equal + p:s - word1 is the plural of word2 + s:p - word2 is the plural of word1 + p:p - word1 and word2 are two different plural forms of the one word + False - otherwise + + """ + return self._plequal(word1, word2, self.plural_verb) + + @validate_arguments + def compare_adjs(self, word1: Word, word2: Word) -> Union[str, bool]: + """ + compare word1 and word2 for equality regardless of plurality + word1 and word2 are to be treated as adjectives + + return values: + eq - the strings are equal + p:s - word1 is the plural of word2 + s:p - word2 is the plural of word1 + p:p - word1 and word2 are two different plural forms of the one word + False - otherwise + + """ + return self._plequal(word1, word2, self.plural_adj) + + @validate_arguments + def singular_noun( + self, + text: Word, + count: Optional[Union[int, str, Any]] = None, + gender: Optional[str] = None, + ) -> Union[str, bool]: + """ + Return the singular of text, where text is a plural noun. + + If count supplied, then return the singular if count is one of: + 1, a, an, one, each, every, this, that or if count is None + + otherwise return text unchanged. + + Whitespace at the start and end is preserved. + + >>> p = engine() + >>> p.singular_noun('horses') + 'horse' + >>> p.singular_noun('knights') + 'knight' + + Returns False when a singular noun is passed. + + >>> p.singular_noun('horse') + False + >>> p.singular_noun('knight') + False + >>> p.singular_noun('soldier') + False + + """ + pre, word, post = self.partition_word(text) + if not word: + return text + sing = self._sinoun(word, count=count, gender=gender) + if sing is not False: + plural = self.postprocess(word, sing) + return f"{pre}{plural}{post}" + return False + + def _plequal(self, word1: str, word2: str, pl) -> Union[str, bool]: # noqa: C901 + classval = self.classical_dict.copy() + self.classical_dict = all_classical.copy() + if word1 == word2: + return "eq" + if word1 == pl(word2): + return "p:s" + if pl(word1) == word2: + return "s:p" + self.classical_dict = no_classical.copy() + if word1 == pl(word2): + return "p:s" + if pl(word1) == word2: + return "s:p" + self.classical_dict = classval.copy() + + if pl == self.plural or pl == self.plural_noun: + if self._pl_check_plurals_N(word1, word2): + return "p:p" + if self._pl_check_plurals_N(word2, word1): + return "p:p" + if pl == self.plural or pl == self.plural_adj: + if self._pl_check_plurals_adj(word1, word2): + return "p:p" + return False + + def _pl_reg_plurals(self, pair: str, stems: str, end1: str, end2: str) -> bool: + pattern = fr"({stems})({end1}\|\1{end2}|{end2}\|\1{end1})" + return bool(re.search(pattern, pair)) + + def _pl_check_plurals_N(self, word1: str, word2: str) -> bool: + stem_endings = ( + (pl_sb_C_a_ata, "as", "ata"), + (pl_sb_C_is_ides, "is", "ides"), + (pl_sb_C_a_ae, "s", "e"), + (pl_sb_C_en_ina, "ens", "ina"), + (pl_sb_C_um_a, "ums", "a"), + (pl_sb_C_us_i, "uses", "i"), + (pl_sb_C_on_a, "ons", "a"), + (pl_sb_C_o_i_stems, "os", "i"), + (pl_sb_C_ex_ices, "exes", "ices"), + (pl_sb_C_ix_ices, "ixes", "ices"), + (pl_sb_C_i, "s", "i"), + (pl_sb_C_im, "s", "im"), + (".*eau", "s", "x"), + (".*ieu", "s", "x"), + (".*tri", "xes", "ces"), + (".{2,}[yia]n", "xes", "ges"), + ) + + words = map(Words, (word1, word2)) + pair = "|".join(word.last for word in words) + + return ( + pair in pl_sb_irregular_s.values() + or pair in pl_sb_irregular.values() + or pair in pl_sb_irregular_caps.values() + or any( + self._pl_reg_plurals(pair, stems, end1, end2) + for stems, end1, end2 in stem_endings + ) + ) + + def _pl_check_plurals_adj(self, word1: str, word2: str) -> bool: + word1a = word1[: word1.rfind("'")] if word1.endswith(("'s", "'")) else "" + word2a = word2[: word2.rfind("'")] if word2.endswith(("'s", "'")) else "" + + return ( + bool(word1a) + and bool(word2a) + and ( + self._pl_check_plurals_N(word1a, word2a) + or self._pl_check_plurals_N(word2a, word1a) + ) + ) + + def get_count(self, count: Optional[Union[str, int]] = None) -> Union[str, int]: + if count is None and self.persistent_count is not None: + count = self.persistent_count + + if count is not None: + count = ( + 1 + if ( + (str(count) in pl_count_one) + or ( + self.classical_dict["zero"] + and str(count).lower() in pl_count_zero + ) + ) + else 2 + ) + else: + count = "" + return count + + # @profile + def _plnoun( # noqa: C901 + self, word: str, count: Optional[Union[str, int]] = None + ) -> str: + count = self.get_count(count) + + # DEFAULT TO PLURAL + + if count == 1: + return word + + # HANDLE USER-DEFINED NOUNS + + value = self.ud_match(word, self.pl_sb_user_defined) + if value is not None: + return value + + # HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS + + if word == "": + return word + + word = Words(word) + + if word.last.lower() in pl_sb_uninflected_complete: + return word + + if word in pl_sb_uninflected_caps: + return word + + for k, v in pl_sb_uninflected_bysize.items(): + if word.lowered[-k:] in v: + return word + + if self.classical_dict["herd"] and word.last.lower() in pl_sb_uninflected_herd: + return word + + # HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.) + + mo = PL_SB_POSTFIX_ADJ_STEMS_RE.search(word) + if mo and mo.group(2) != "": + return f"{self._plnoun(mo.group(1), 2)}{mo.group(2)}" + + if " a " in word.lowered or "-a-" in word.lowered: + mo = PL_SB_PREP_DUAL_COMPOUND_RE.search(word) + if mo and mo.group(2) != "" and mo.group(3) != "": + return ( + f"{self._plnoun(mo.group(1), 2)}" + f"{mo.group(2)}" + f"{self._plnoun(mo.group(3))}" + ) + + if len(word.split_) >= 3: + for numword in range(1, len(word.split_) - 1): + if word.split_[numword] in pl_prep_list_da: + return " ".join( + word.split_[: numword - 1] + + [self._plnoun(word.split_[numword - 1], 2)] + + word.split_[numword:] + ) + + # only pluralize denominators in units + mo = DENOMINATOR.search(word.lowered) + if mo: + index = len(mo.group("denominator")) + return f"{self._plnoun(word[:index])}{word[index:]}" + + # handle units given in degrees (only accept if + # there is no more than one word following) + # degree Celsius => degrees Celsius but degree + # fahrenheit hour => degree fahrenheit hours + if len(word.split_) >= 2 and word.split_[-2] == "degree": + return " ".join([self._plnoun(word.first)] + word.split_[1:]) + + with contextlib.suppress(ValueError): + return self._handle_prepositional_phrase( + word.lowered, + functools.partial(self._plnoun, count=2), + '-', + ) + + # HANDLE PRONOUNS + + for k, v in pl_pron_acc_keys_bysize.items(): + if word.lowered[-k:] in v: # ends with accusative pronoun + for pk, pv in pl_prep_bysize.items(): + if word.lowered[:pk] in pv: # starts with a prep + if word.lowered.split() == [ + word.lowered[:pk], + word.lowered[-k:], + ]: + # only whitespace in between + return word.lowered[:-k] + pl_pron_acc[word.lowered[-k:]] + + try: + return pl_pron_nom[word.lowered] + except KeyError: + pass + + try: + return pl_pron_acc[word.lowered] + except KeyError: + pass + + # HANDLE ISOLATED IRREGULAR PLURALS + + if word.last in pl_sb_irregular_caps: + llen = len(word.last) + return f"{word[:-llen]}{pl_sb_irregular_caps[word.last]}" + + lowered_last = word.last.lower() + if lowered_last in pl_sb_irregular: + llen = len(lowered_last) + return f"{word[:-llen]}{pl_sb_irregular[lowered_last]}" + + dash_split = word.lowered.split('-') + if (" ".join(dash_split[-2:])).lower() in pl_sb_irregular_compound: + llen = len( + " ".join(dash_split[-2:]) + ) # TODO: what if 2 spaces between these words? + return ( + f"{word[:-llen]}" + f"{pl_sb_irregular_compound[(' '.join(dash_split[-2:])).lower()]}" + ) + + if word.lowered[-3:] == "quy": + return f"{word[:-1]}ies" + + if word.lowered[-6:] == "person": + if self.classical_dict["persons"]: + return f"{word}s" + else: + return f"{word[:-4]}ople" + + # HANDLE FAMILIES OF IRREGULAR PLURALS + + if word.lowered[-3:] == "man": + for k, v in pl_sb_U_man_mans_bysize.items(): + if word.lowered[-k:] in v: + return f"{word}s" + for k, v in pl_sb_U_man_mans_caps_bysize.items(): + if word[-k:] in v: + return f"{word}s" + return f"{word[:-3]}men" + if word.lowered[-5:] == "mouse": + return f"{word[:-5]}mice" + if word.lowered[-5:] == "louse": + v = pl_sb_U_louse_lice_bysize.get(len(word)) + if v and word.lowered in v: + return f"{word[:-5]}lice" + return f"{word}s" + if word.lowered[-5:] == "goose": + return f"{word[:-5]}geese" + if word.lowered[-5:] == "tooth": + return f"{word[:-5]}teeth" + if word.lowered[-4:] == "foot": + return f"{word[:-4]}feet" + if word.lowered[-4:] == "taco": + return f"{word[:-5]}tacos" + + if word.lowered == "die": + return "dice" + + # HANDLE UNASSIMILATED IMPORTS + + if word.lowered[-4:] == "ceps": + return word + if word.lowered[-4:] == "zoon": + return f"{word[:-2]}a" + if word.lowered[-3:] in ("cis", "sis", "xis"): + return f"{word[:-2]}es" + + for lastlet, d, numend, post in ( + ("h", pl_sb_U_ch_chs_bysize, None, "s"), + ("x", pl_sb_U_ex_ices_bysize, -2, "ices"), + ("x", pl_sb_U_ix_ices_bysize, -2, "ices"), + ("m", pl_sb_U_um_a_bysize, -2, "a"), + ("s", pl_sb_U_us_i_bysize, -2, "i"), + ("n", pl_sb_U_on_a_bysize, -2, "a"), + ("a", pl_sb_U_a_ae_bysize, None, "e"), + ): + if word.lowered[-1] == lastlet: # this test to add speed + for k, v in d.items(): + if word.lowered[-k:] in v: + return word[:numend] + post + + # HANDLE INCOMPLETELY ASSIMILATED IMPORTS + + if self.classical_dict["ancient"]: + if word.lowered[-4:] == "trix": + return f"{word[:-1]}ces" + if word.lowered[-3:] in ("eau", "ieu"): + return f"{word}x" + if word.lowered[-3:] in ("ynx", "inx", "anx") and len(word) > 4: + return f"{word[:-1]}ges" + + for lastlet, d, numend, post in ( + ("n", pl_sb_C_en_ina_bysize, -2, "ina"), + ("x", pl_sb_C_ex_ices_bysize, -2, "ices"), + ("x", pl_sb_C_ix_ices_bysize, -2, "ices"), + ("m", pl_sb_C_um_a_bysize, -2, "a"), + ("s", pl_sb_C_us_i_bysize, -2, "i"), + ("s", pl_sb_C_us_us_bysize, None, ""), + ("a", pl_sb_C_a_ae_bysize, None, "e"), + ("a", pl_sb_C_a_ata_bysize, None, "ta"), + ("s", pl_sb_C_is_ides_bysize, -1, "des"), + ("o", pl_sb_C_o_i_bysize, -1, "i"), + ("n", pl_sb_C_on_a_bysize, -2, "a"), + ): + if word.lowered[-1] == lastlet: # this test to add speed + for k, v in d.items(): + if word.lowered[-k:] in v: + return word[:numend] + post + + for d, numend, post in ( + (pl_sb_C_i_bysize, None, "i"), + (pl_sb_C_im_bysize, None, "im"), + ): + for k, v in d.items(): + if word.lowered[-k:] in v: + return word[:numend] + post + + # HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS + + if lowered_last in pl_sb_singular_s_complete: + return f"{word}es" + + for k, v in pl_sb_singular_s_bysize.items(): + if word.lowered[-k:] in v: + return f"{word}es" + + if word.lowered[-2:] == "es" and word[0] == word[0].upper(): + return f"{word}es" + + if word.lowered[-1] == "z": + for k, v in pl_sb_z_zes_bysize.items(): + if word.lowered[-k:] in v: + return f"{word}es" + + if word.lowered[-2:-1] != "z": + return f"{word}zes" + + if word.lowered[-2:] == "ze": + for k, v in pl_sb_ze_zes_bysize.items(): + if word.lowered[-k:] in v: + return f"{word}s" + + if word.lowered[-2:] in ("ch", "sh", "zz", "ss") or word.lowered[-1] == "x": + return f"{word}es" + + # HANDLE ...f -> ...ves + + if word.lowered[-3:] in ("elf", "alf", "olf"): + return f"{word[:-1]}ves" + if word.lowered[-3:] == "eaf" and word.lowered[-4:-3] != "d": + return f"{word[:-1]}ves" + if word.lowered[-4:] in ("nife", "life", "wife"): + return f"{word[:-2]}ves" + if word.lowered[-3:] == "arf": + return f"{word[:-1]}ves" + + # HANDLE ...y + + if word.lowered[-1] == "y": + if word.lowered[-2:-1] in "aeiou" or len(word) == 1: + return f"{word}s" + + if self.classical_dict["names"]: + if word.lowered[-1] == "y" and word[0] == word[0].upper(): + return f"{word}s" + + return f"{word[:-1]}ies" + + # HANDLE ...o + + if lowered_last in pl_sb_U_o_os_complete: + return f"{word}s" + + for k, v in pl_sb_U_o_os_bysize.items(): + if word.lowered[-k:] in v: + return f"{word}s" + + if word.lowered[-2:] in ("ao", "eo", "io", "oo", "uo"): + return f"{word}s" + + if word.lowered[-1] == "o": + return f"{word}es" + + # OTHERWISE JUST ADD ...s + + return f"{word}s" + + @classmethod + def _handle_prepositional_phrase(cls, phrase, transform, sep): + """ + Given a word or phrase possibly separated by sep, parse out + the prepositional phrase and apply the transform to the word + preceding the prepositional phrase. + + Raise ValueError if the pivot is not found or if at least two + separators are not found. + + >>> engine._handle_prepositional_phrase("man-of-war", str.upper, '-') + 'MAN-of-war' + >>> engine._handle_prepositional_phrase("man of war", str.upper, ' ') + 'MAN of war' + """ + parts = phrase.split(sep) + if len(parts) < 3: + raise ValueError("Cannot handle words with fewer than two separators") + + pivot = cls._find_pivot(parts, pl_prep_list_da) + + transformed = transform(parts[pivot - 1]) or parts[pivot - 1] + return " ".join( + parts[: pivot - 1] + [sep.join([transformed, parts[pivot], ''])] + ) + " ".join(parts[(pivot + 1) :]) + + @staticmethod + def _find_pivot(words, candidates): + pivots = ( + index for index in range(1, len(words) - 1) if words[index] in candidates + ) + try: + return next(pivots) + except StopIteration: + raise ValueError("No pivot found") + + def _pl_special_verb( # noqa: C901 + self, word: str, count: Optional[Union[str, int]] = None + ) -> Union[str, bool]: + if self.classical_dict["zero"] and str(count).lower() in pl_count_zero: + return False + count = self.get_count(count) + + if count == 1: + return word + + # HANDLE USER-DEFINED VERBS + + value = self.ud_match(word, self.pl_v_user_defined) + if value is not None: + return value + + # HANDLE IRREGULAR PRESENT TENSE (SIMPLE AND COMPOUND) + + try: + words = Words(word) + except IndexError: + return False # word is '' + + if words.first in plverb_irregular_pres: + return f"{plverb_irregular_pres[words.first]}{words[len(words.first) :]}" + + # HANDLE IRREGULAR FUTURE, PRETERITE AND PERFECT TENSES + + if words.first in plverb_irregular_non_pres: + return word + + # HANDLE PRESENT NEGATIONS (SIMPLE AND COMPOUND) + + if words.first.endswith("n't") and words.first[:-3] in plverb_irregular_pres: + return ( + f"{plverb_irregular_pres[words.first[:-3]]}n't" + f"{words[len(words.first) :]}" + ) + + if words.first.endswith("n't"): + return word + + # HANDLE SPECIAL CASES + + mo = PLVERB_SPECIAL_S_RE.search(word) + if mo: + return False + if WHITESPACE.search(word): + return False + + if words.lowered == "quizzes": + return "quiz" + + # HANDLE STANDARD 3RD PERSON (CHOP THE ...(e)s OFF SINGLE WORDS) + + if ( + words.lowered[-4:] in ("ches", "shes", "zzes", "sses") + or words.lowered[-3:] == "xes" + ): + return words[:-2] + + if words.lowered[-3:] == "ies" and len(words) > 3: + return words.lowered[:-3] + "y" + + if ( + words.last.lower() in pl_v_oes_oe + or words.lowered[-4:] in pl_v_oes_oe_endings_size4 + or words.lowered[-5:] in pl_v_oes_oe_endings_size5 + ): + return words[:-1] + + if words.lowered.endswith("oes") and len(words) > 3: + return words.lowered[:-2] + + mo = ENDS_WITH_S.search(words) + if mo: + return mo.group(1) + + # OTHERWISE, A REGULAR VERB (HANDLE ELSEWHERE) + + return False + + def _pl_general_verb( + self, word: str, count: Optional[Union[str, int]] = None + ) -> str: + count = self.get_count(count) + + if count == 1: + return word + + # HANDLE AMBIGUOUS PRESENT TENSES (SIMPLE AND COMPOUND) + + mo = plverb_ambiguous_pres_keys.search(word) + if mo: + return f"{plverb_ambiguous_pres[mo.group(1).lower()]}{mo.group(2)}" + + # HANDLE AMBIGUOUS PRETERITE AND PERFECT TENSES + + mo = plverb_ambiguous_non_pres.search(word) + if mo: + return word + + # OTHERWISE, 1st OR 2ND PERSON IS UNINFLECTED + + return word + + def _pl_special_adjective( + self, word: str, count: Optional[Union[str, int]] = None + ) -> Union[str, bool]: + count = self.get_count(count) + + if count == 1: + return word + + # HANDLE USER-DEFINED ADJECTIVES + + value = self.ud_match(word, self.pl_adj_user_defined) + if value is not None: + return value + + # HANDLE KNOWN CASES + + mo = pl_adj_special_keys.search(word) + if mo: + return pl_adj_special[mo.group(1).lower()] + + # HANDLE POSSESSIVES + + mo = pl_adj_poss_keys.search(word) + if mo: + return pl_adj_poss[mo.group(1).lower()] + + mo = ENDS_WITH_APOSTROPHE_S.search(word) + if mo: + pl = self.plural_noun(mo.group(1)) + trailing_s = "" if pl[-1] == "s" else "s" + return f"{pl}'{trailing_s}" + + # OTHERWISE, NO IDEA + + return False + + # @profile + def _sinoun( # noqa: C901 + self, + word: str, + count: Optional[Union[str, int]] = None, + gender: Optional[str] = None, + ) -> Union[str, bool]: + count = self.get_count(count) + + # DEFAULT TO PLURAL + + if count == 2: + return word + + # SET THE GENDER + + try: + if gender is None: + gender = self.thegender + elif gender not in singular_pronoun_genders: + raise BadGenderError + except (TypeError, IndexError): + raise BadGenderError + + # HANDLE USER-DEFINED NOUNS + + value = self.ud_match(word, self.si_sb_user_defined) + if value is not None: + return value + + # HANDLE EMPTY WORD, SINGULAR COUNT AND UNINFLECTED PLURALS + + if word == "": + return word + + if word in si_sb_ois_oi_case: + return word[:-1] + + words = Words(word) + + if words.last.lower() in pl_sb_uninflected_complete: + return word + + if word in pl_sb_uninflected_caps: + return word + + for k, v in pl_sb_uninflected_bysize.items(): + if words.lowered[-k:] in v: + return word + + if self.classical_dict["herd"] and words.last.lower() in pl_sb_uninflected_herd: + return word + + if words.last.lower() in pl_sb_C_us_us: + return word if self.classical_dict["ancient"] else False + + # HANDLE COMPOUNDS ("Governor General", "mother-in-law", "aide-de-camp", ETC.) + + mo = PL_SB_POSTFIX_ADJ_STEMS_RE.search(word) + if mo and mo.group(2) != "": + return f"{self._sinoun(mo.group(1), 1, gender=gender)}{mo.group(2)}" + + with contextlib.suppress(ValueError): + return self._handle_prepositional_phrase( + words.lowered, + functools.partial(self._sinoun, count=1, gender=gender), + ' ', + ) + + with contextlib.suppress(ValueError): + return self._handle_prepositional_phrase( + words.lowered, + functools.partial(self._sinoun, count=1, gender=gender), + '-', + ) + + # HANDLE PRONOUNS + + for k, v in si_pron_acc_keys_bysize.items(): + if words.lowered[-k:] in v: # ends with accusative pronoun + for pk, pv in pl_prep_bysize.items(): + if words.lowered[:pk] in pv: # starts with a prep + if words.lowered.split() == [ + words.lowered[:pk], + words.lowered[-k:], + ]: + # only whitespace in between + return words.lowered[:-k] + get_si_pron( + "acc", words.lowered[-k:], gender + ) + + try: + return get_si_pron("nom", words.lowered, gender) + except KeyError: + pass + + try: + return get_si_pron("acc", words.lowered, gender) + except KeyError: + pass + + # HANDLE ISOLATED IRREGULAR PLURALS + + if words.last in si_sb_irregular_caps: + llen = len(words.last) + return "{}{}".format(word[:-llen], si_sb_irregular_caps[words.last]) + + if words.last.lower() in si_sb_irregular: + llen = len(words.last.lower()) + return "{}{}".format(word[:-llen], si_sb_irregular[words.last.lower()]) + + dash_split = words.lowered.split("-") + if (" ".join(dash_split[-2:])).lower() in si_sb_irregular_compound: + llen = len( + " ".join(dash_split[-2:]) + ) # TODO: what if 2 spaces between these words? + return "{}{}".format( + word[:-llen], + si_sb_irregular_compound[(" ".join(dash_split[-2:])).lower()], + ) + + if words.lowered[-5:] == "quies": + return word[:-3] + "y" + + if words.lowered[-7:] == "persons": + return word[:-1] + if words.lowered[-6:] == "people": + return word[:-4] + "rson" + + # HANDLE FAMILIES OF IRREGULAR PLURALS + + if words.lowered[-4:] == "mans": + for k, v in si_sb_U_man_mans_bysize.items(): + if words.lowered[-k:] in v: + return word[:-1] + for k, v in si_sb_U_man_mans_caps_bysize.items(): + if word[-k:] in v: + return word[:-1] + if words.lowered[-3:] == "men": + return word[:-3] + "man" + if words.lowered[-4:] == "mice": + return word[:-4] + "mouse" + if words.lowered[-4:] == "lice": + v = si_sb_U_louse_lice_bysize.get(len(word)) + if v and words.lowered in v: + return word[:-4] + "louse" + if words.lowered[-5:] == "geese": + return word[:-5] + "goose" + if words.lowered[-5:] == "teeth": + return word[:-5] + "tooth" + if words.lowered[-4:] == "feet": + return word[:-4] + "foot" + + if words.lowered == "dice": + return "die" + + # HANDLE UNASSIMILATED IMPORTS + + if words.lowered[-4:] == "ceps": + return word + if words.lowered[-3:] == "zoa": + return word[:-1] + "on" + + for lastlet, d, unass_numend, post in ( + ("s", si_sb_U_ch_chs_bysize, -1, ""), + ("s", si_sb_U_ex_ices_bysize, -4, "ex"), + ("s", si_sb_U_ix_ices_bysize, -4, "ix"), + ("a", si_sb_U_um_a_bysize, -1, "um"), + ("i", si_sb_U_us_i_bysize, -1, "us"), + ("a", si_sb_U_on_a_bysize, -1, "on"), + ("e", si_sb_U_a_ae_bysize, -1, ""), + ): + if words.lowered[-1] == lastlet: # this test to add speed + for k, v in d.items(): + if words.lowered[-k:] in v: + return word[:unass_numend] + post + + # HANDLE INCOMPLETELY ASSIMILATED IMPORTS + + if self.classical_dict["ancient"]: + + if words.lowered[-6:] == "trices": + return word[:-3] + "x" + if words.lowered[-4:] in ("eaux", "ieux"): + return word[:-1] + if words.lowered[-5:] in ("ynges", "inges", "anges") and len(word) > 6: + return word[:-3] + "x" + + for lastlet, d, class_numend, post in ( + ("a", si_sb_C_en_ina_bysize, -3, "en"), + ("s", si_sb_C_ex_ices_bysize, -4, "ex"), + ("s", si_sb_C_ix_ices_bysize, -4, "ix"), + ("a", si_sb_C_um_a_bysize, -1, "um"), + ("i", si_sb_C_us_i_bysize, -1, "us"), + ("s", pl_sb_C_us_us_bysize, None, ""), + ("e", si_sb_C_a_ae_bysize, -1, ""), + ("a", si_sb_C_a_ata_bysize, -2, ""), + ("s", si_sb_C_is_ides_bysize, -3, "s"), + ("i", si_sb_C_o_i_bysize, -1, "o"), + ("a", si_sb_C_on_a_bysize, -1, "on"), + ("m", si_sb_C_im_bysize, -2, ""), + ("i", si_sb_C_i_bysize, -1, ""), + ): + if words.lowered[-1] == lastlet: # this test to add speed + for k, v in d.items(): + if words.lowered[-k:] in v: + return word[:class_numend] + post + + # HANDLE PLURLS ENDING IN uses -> use + + if ( + words.lowered[-6:] == "houses" + or word in si_sb_uses_use_case + or words.last.lower() in si_sb_uses_use + ): + return word[:-1] + + # HANDLE PLURLS ENDING IN ies -> ie + + if word in si_sb_ies_ie_case or words.last.lower() in si_sb_ies_ie: + return word[:-1] + + # HANDLE PLURLS ENDING IN oes -> oe + + if ( + words.lowered[-5:] == "shoes" + or word in si_sb_oes_oe_case + or words.last.lower() in si_sb_oes_oe + ): + return word[:-1] + + # HANDLE SINGULAR NOUNS ENDING IN ...s OR OTHER SILIBANTS + + if word in si_sb_sses_sse_case or words.last.lower() in si_sb_sses_sse: + return word[:-1] + + if words.last.lower() in si_sb_singular_s_complete: + return word[:-2] + + for k, v in si_sb_singular_s_bysize.items(): + if words.lowered[-k:] in v: + return word[:-2] + + if words.lowered[-4:] == "eses" and word[0] == word[0].upper(): + return word[:-2] + + if words.last.lower() in si_sb_z_zes: + return word[:-2] + + if words.last.lower() in si_sb_zzes_zz: + return word[:-2] + + if words.lowered[-4:] == "zzes": + return word[:-3] + + if word in si_sb_ches_che_case or words.last.lower() in si_sb_ches_che: + return word[:-1] + + if words.lowered[-4:] in ("ches", "shes"): + return word[:-2] + + if words.last.lower() in si_sb_xes_xe: + return word[:-1] + + if words.lowered[-3:] == "xes": + return word[:-2] + + # HANDLE ...f -> ...ves + + if word in si_sb_ves_ve_case or words.last.lower() in si_sb_ves_ve: + return word[:-1] + + if words.lowered[-3:] == "ves": + if words.lowered[-5:-3] in ("el", "al", "ol"): + return word[:-3] + "f" + if words.lowered[-5:-3] == "ea" and word[-6:-5] != "d": + return word[:-3] + "f" + if words.lowered[-5:-3] in ("ni", "li", "wi"): + return word[:-3] + "fe" + if words.lowered[-5:-3] == "ar": + return word[:-3] + "f" + + # HANDLE ...y + + if words.lowered[-2:] == "ys": + if len(words.lowered) > 2 and words.lowered[-3] in "aeiou": + return word[:-1] + + if self.classical_dict["names"]: + if words.lowered[-2:] == "ys" and word[0] == word[0].upper(): + return word[:-1] + + if words.lowered[-3:] == "ies": + return word[:-3] + "y" + + # HANDLE ...o + + if words.lowered[-2:] == "os": + + if words.last.lower() in si_sb_U_o_os_complete: + return word[:-1] + + for k, v in si_sb_U_o_os_bysize.items(): + if words.lowered[-k:] in v: + return word[:-1] + + if words.lowered[-3:] in ("aos", "eos", "ios", "oos", "uos"): + return word[:-1] + + if words.lowered[-3:] == "oes": + return word[:-2] + + # UNASSIMILATED IMPORTS FINAL RULE + + if word in si_sb_es_is: + return word[:-2] + "is" + + # OTHERWISE JUST REMOVE ...s + + if words.lowered[-1] == "s": + return word[:-1] + + # COULD NOT FIND SINGULAR + + return False + + # ADJECTIVES + + @validate_arguments + def a(self, text: Word, count: Optional[Union[int, str, Any]] = 1) -> str: + """ + Return the appropriate indefinite article followed by text. + + The indefinite article is either 'a' or 'an'. + + If count is not one, then return count followed by text + instead of 'a' or 'an'. + + Whitespace at the start and end is preserved. + + """ + mo = INDEFINITE_ARTICLE_TEST.search(text) + if mo: + word = mo.group(2) + if not word: + return text + pre = mo.group(1) + post = mo.group(3) + result = self._indef_article(word, count) + return f"{pre}{result}{post}" + return "" + + an = a + + _indef_article_cases = ( + # HANDLE ORDINAL FORMS + (A_ordinal_a, "a"), + (A_ordinal_an, "an"), + # HANDLE SPECIAL CASES + (A_explicit_an, "an"), + (SPECIAL_AN, "an"), + (SPECIAL_A, "a"), + # HANDLE ABBREVIATIONS + (A_abbrev, "an"), + (SPECIAL_ABBREV_AN, "an"), + (SPECIAL_ABBREV_A, "a"), + # HANDLE CONSONANTS + (CONSONANTS, "a"), + # HANDLE SPECIAL VOWEL-FORMS + (ARTICLE_SPECIAL_EU, "a"), + (ARTICLE_SPECIAL_ONCE, "a"), + (ARTICLE_SPECIAL_ONETIME, "a"), + (ARTICLE_SPECIAL_UNIT, "a"), + (ARTICLE_SPECIAL_UBA, "a"), + (ARTICLE_SPECIAL_UKR, "a"), + (A_explicit_a, "a"), + # HANDLE SPECIAL CAPITALS + (SPECIAL_CAPITALS, "a"), + # HANDLE VOWELS + (VOWELS, "an"), + # HANDLE y... + # (BEFORE CERTAIN CONSONANTS IMPLIES (UNNATURALIZED) "i.." SOUND) + (A_y_cons, "an"), + ) + + def _indef_article(self, word: str, count: Union[int, str, Any]) -> str: + mycount = self.get_count(count) + + if mycount != 1: + return f"{count} {word}" + + # HANDLE USER-DEFINED VARIANTS + + value = self.ud_match(word, self.A_a_user_defined) + if value is not None: + return f"{value} {word}" + + matches = ( + f'{article} {word}' + for regexen, article in self._indef_article_cases + if regexen.search(word) + ) + + # OTHERWISE, GUESS "a" + fallback = f'a {word}' + return next(matches, fallback) + + # 2. TRANSLATE ZERO-QUANTIFIED $word TO "no plural($word)" + + @validate_arguments + def no(self, text: Word, count: Optional[Union[int, str]] = None) -> str: + """ + If count is 0, no, zero or nil, return 'no' followed by the plural + of text. + + If count is one of: + 1, a, an, one, each, every, this, that + return count followed by text. + + Otherwise return count follow by the plural of text. + + In the return value count is always followed by a space. + + Whitespace at the start and end is preserved. + + """ + if count is None and self.persistent_count is not None: + count = self.persistent_count + + if count is None: + count = 0 + mo = PARTITION_WORD.search(text) + if mo: + pre = mo.group(1) + word = mo.group(2) + post = mo.group(3) + else: + pre = "" + word = "" + post = "" + + if str(count).lower() in pl_count_zero: + count = 'no' + return f"{pre}{count} {self.plural(word, count)}{post}" + + # PARTICIPLES + + @validate_arguments + def present_participle(self, word: Word) -> str: + """ + Return the present participle for word. + + word is the 3rd person singular verb. + + """ + plv = self.plural_verb(word, 2) + ans = plv + + for regexen, repl in PRESENT_PARTICIPLE_REPLACEMENTS: + ans, num = regexen.subn(repl, plv) + if num: + return f"{ans}ing" + return f"{ans}ing" + + # NUMERICAL INFLECTIONS + + @validate_arguments + def ordinal(self, num: Union[int, Word]) -> str: # noqa: C901 + """ + Return the ordinal of num. + + num can be an integer or text + + e.g. ordinal(1) returns '1st' + ordinal('one') returns 'first' + + """ + if DIGIT.match(str(num)): + if isinstance(num, (int, float)): + n = int(num) + else: + if "." in str(num): + try: + # numbers after decimal, + # so only need last one for ordinal + n = int(num[-1]) + + except ValueError: # ends with '.', so need to use whole string + n = int(num[:-1]) + else: + n = int(num) + try: + post = nth[n % 100] + except KeyError: + post = nth[n % 10] + return f"{num}{post}" + else: + # Mad props to Damian Conway (?) whose ordinal() + # algorithm is type-bendy enough to foil MyPy + str_num: str = num # type: ignore[assignment] + mo = ordinal_suff.search(str_num) + if mo: + post = ordinal[mo.group(1)] + rval = ordinal_suff.sub(post, str_num) + else: + rval = f"{str_num}th" + return rval + + def millfn(self, ind: int = 0) -> str: + if ind > len(mill) - 1: + print3("number out of range") + raise NumOutOfRangeError + return mill[ind] + + def unitfn(self, units: int, mindex: int = 0) -> str: + return f"{unit[units]}{self.millfn(mindex)}" + + def tenfn(self, tens, units, mindex=0) -> str: + if tens != 1: + tens_part = ten[tens] + if tens and units: + hyphen = "-" + else: + hyphen = "" + unit_part = unit[units] + mill_part = self.millfn(mindex) + return f"{tens_part}{hyphen}{unit_part}{mill_part}" + return f"{teen[units]}{mill[mindex]}" + + def hundfn(self, hundreds: int, tens: int, units: int, mindex: int) -> str: + if hundreds: + andword = f" {self._number_args['andword']} " if tens or units else "" + # use unit not unitfn as simpler + return ( + f"{unit[hundreds]} hundred{andword}" + f"{self.tenfn(tens, units)}{self.millfn(mindex)}, " + ) + if tens or units: + return f"{self.tenfn(tens, units)}{self.millfn(mindex)}, " + return "" + + def group1sub(self, mo: Match) -> str: + units = int(mo.group(1)) + if units == 1: + return f" {self._number_args['one']}, " + elif units: + return f"{unit[units]}, " + else: + return f" {self._number_args['zero']}, " + + def group1bsub(self, mo: Match) -> str: + units = int(mo.group(1)) + if units: + return f"{unit[units]}, " + else: + return f" {self._number_args['zero']}, " + + def group2sub(self, mo: Match) -> str: + tens = int(mo.group(1)) + units = int(mo.group(2)) + if tens: + return f"{self.tenfn(tens, units)}, " + if units: + return f" {self._number_args['zero']} {unit[units]}, " + return f" {self._number_args['zero']} {self._number_args['zero']}, " + + def group3sub(self, mo: Match) -> str: + hundreds = int(mo.group(1)) + tens = int(mo.group(2)) + units = int(mo.group(3)) + if hundreds == 1: + hunword = f" {self._number_args['one']}" + elif hundreds: + hunword = str(unit[hundreds]) + else: + hunword = f" {self._number_args['zero']}" + if tens: + tenword = self.tenfn(tens, units) + elif units: + tenword = f" {self._number_args['zero']} {unit[units]}" + else: + tenword = f" {self._number_args['zero']} {self._number_args['zero']}" + return f"{hunword} {tenword}, " + + def hundsub(self, mo: Match) -> str: + ret = self.hundfn( + int(mo.group(1)), int(mo.group(2)), int(mo.group(3)), self.mill_count + ) + self.mill_count += 1 + return ret + + def tensub(self, mo: Match) -> str: + return f"{self.tenfn(int(mo.group(1)), int(mo.group(2)), self.mill_count)}, " + + def unitsub(self, mo: Match) -> str: + return f"{self.unitfn(int(mo.group(1)), self.mill_count)}, " + + def enword(self, num: str, group: int) -> str: + # import pdb + # pdb.set_trace() + + if group == 1: + num = DIGIT_GROUP.sub(self.group1sub, num) + elif group == 2: + num = TWO_DIGITS.sub(self.group2sub, num) + num = DIGIT_GROUP.sub(self.group1bsub, num, 1) + elif group == 3: + num = THREE_DIGITS.sub(self.group3sub, num) + num = TWO_DIGITS.sub(self.group2sub, num, 1) + num = DIGIT_GROUP.sub(self.group1sub, num, 1) + elif int(num) == 0: + num = self._number_args["zero"] + elif int(num) == 1: + num = self._number_args["one"] + else: + num = num.lstrip().lstrip("0") + self.mill_count = 0 + # surely there's a better way to do the next bit + mo = THREE_DIGITS_WORD.search(num) + while mo: + num = THREE_DIGITS_WORD.sub(self.hundsub, num, 1) + mo = THREE_DIGITS_WORD.search(num) + num = TWO_DIGITS_WORD.sub(self.tensub, num, 1) + num = ONE_DIGIT_WORD.sub(self.unitsub, num, 1) + return num + + @validate_arguments(config=dict(arbitrary_types_allowed=True)) # noqa: C901 + def number_to_words( # noqa: C901 + self, + num: Union[Number, Word], + wantlist: bool = False, + group: int = 0, + comma: Union[Falsish, str] = ",", + andword: str = "and", + zero: str = "zero", + one: str = "one", + decimal: Union[Falsish, str] = "point", + threshold: Optional[int] = None, + ) -> Union[str, List[str]]: + """ + Return a number in words. + + group = 1, 2 or 3 to group numbers before turning into words + comma: define comma + + andword: + word for 'and'. Can be set to ''. + e.g. "one hundred and one" vs "one hundred one" + + zero: word for '0' + one: word for '1' + decimal: word for decimal point + threshold: numbers above threshold not turned into words + + parameters not remembered from last call. Departure from Perl version. + """ + self._number_args = {"andword": andword, "zero": zero, "one": one} + num = str(num) + + # Handle "stylistic" conversions (up to a given threshold)... + if threshold is not None and float(num) > threshold: + spnum = num.split(".", 1) + while comma: + (spnum[0], n) = FOUR_DIGIT_COMMA.subn(r"\1,\2", spnum[0]) + if n == 0: + break + try: + return f"{spnum[0]}.{spnum[1]}" + except IndexError: + return str(spnum[0]) + + if group < 0 or group > 3: + raise BadChunkingOptionError + nowhite = num.lstrip() + if nowhite[0] == "+": + sign = "plus" + elif nowhite[0] == "-": + sign = "minus" + else: + sign = "" + + if num in nth_suff: + num = zero + + myord = num[-2:] in nth_suff + if myord: + num = num[:-2] + finalpoint = False + if decimal: + if group != 0: + chunks = num.split(".") + else: + chunks = num.split(".", 1) + if chunks[-1] == "": # remove blank string if nothing after decimal + chunks = chunks[:-1] + finalpoint = True # add 'point' to end of output + else: + chunks = [num] + + first: Union[int, str, bool] = 1 + loopstart = 0 + + if chunks[0] == "": + first = 0 + if len(chunks) > 1: + loopstart = 1 + + for i in range(loopstart, len(chunks)): + chunk = chunks[i] + # remove all non numeric \D + chunk = NON_DIGIT.sub("", chunk) + if chunk == "": + chunk = "0" + + if group == 0 and (first == 0 or first == ""): + chunk = self.enword(chunk, 1) + else: + chunk = self.enword(chunk, group) + + if chunk[-2:] == ", ": + chunk = chunk[:-2] + chunk = WHITESPACES_COMMA.sub(",", chunk) + + if group == 0 and first: + chunk = COMMA_WORD.sub(f" {andword} \\1", chunk) + chunk = WHITESPACES.sub(" ", chunk) + # chunk = re.sub(r"(\A\s|\s\Z)", self.blankfn, chunk) + chunk = chunk.strip() + if first: + first = "" + chunks[i] = chunk + + numchunks = [] + if first != 0: + numchunks = chunks[0].split(f"{comma} ") + + if myord and numchunks: + # TODO: can this be just one re as it is in perl? + mo = ordinal_suff.search(numchunks[-1]) + if mo: + numchunks[-1] = ordinal_suff.sub(ordinal[mo.group(1)], numchunks[-1]) + else: + numchunks[-1] += "th" + + for chunk in chunks[1:]: + numchunks.append(decimal) + numchunks.extend(chunk.split(f"{comma} ")) + + if finalpoint: + numchunks.append(decimal) + + # wantlist: Perl list context. can explicitly specify in Python + if wantlist: + if sign: + numchunks = [sign] + numchunks + return numchunks + elif group: + signout = f"{sign} " if sign else "" + return f"{signout}{', '.join(numchunks)}" + else: + signout = f"{sign} " if sign else "" + num = f"{signout}{numchunks.pop(0)}" + if decimal is None: + first = True + else: + first = not num.endswith(decimal) + for nc in numchunks: + if nc == decimal: + num += f" {nc}" + first = 0 + elif first: + num += f"{comma} {nc}" + else: + num += f" {nc}" + return num + + # Join words with commas and a trailing 'and' (when appropriate)... + + @validate_arguments + def join( + self, + words: Optional[Sequence[Word]], + sep: Optional[str] = None, + sep_spaced: bool = True, + final_sep: Optional[str] = None, + conj: str = "and", + conj_spaced: bool = True, + ) -> str: + """ + Join words into a list. + + e.g. join(['ant', 'bee', 'fly']) returns 'ant, bee, and fly' + + options: + conj: replacement for 'and' + sep: separator. default ',', unless ',' is in the list then ';' + final_sep: final separator. default ',', unless ',' is in the list then ';' + conj_spaced: boolean. Should conj have spaces around it + + """ + if not words: + return "" + if len(words) == 1: + return words[0] + + if conj_spaced: + if conj == "": + conj = " " + else: + conj = f" {conj} " + + if len(words) == 2: + return f"{words[0]}{conj}{words[1]}" + + if sep is None: + if "," in "".join(words): + sep = ";" + else: + sep = "," + if final_sep is None: + final_sep = sep + + final_sep = f"{final_sep}{conj}" + + if sep_spaced: + sep += " " + + return f"{sep.join(words[0:-1])}{final_sep}{words[-1]}" diff --git a/lib/inflect/py.typed b/lib/inflect/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/lib/jaraco/classes/properties.py b/lib/jaraco/classes/properties.py index 4fda4cb6..62f9e200 100644 --- a/lib/jaraco/classes/properties.py +++ b/lib/jaraco/classes/properties.py @@ -143,7 +143,7 @@ class classproperty: return super().__setattr__(key, value) def __init__(self, fget, fset=None): - self.fget = self._fix_function(fget) + self.fget = self._ensure_method(fget) self.fset = fset fset and self.setter(fset) @@ -158,14 +158,13 @@ class classproperty: return self.fset.__get__(None, owner)(value) def setter(self, fset): - self.fset = self._fix_function(fset) + self.fset = self._ensure_method(fset) return self @classmethod - def _fix_function(cls, fn): + def _ensure_method(cls, fn): """ Ensure fn is a classmethod or staticmethod. """ - if not isinstance(fn, (classmethod, staticmethod)): - return classmethod(fn) - return fn + needs_method = not isinstance(fn, (classmethod, staticmethod)) + return classmethod(fn) if needs_method else fn diff --git a/lib/jaraco/collections.py b/lib/jaraco/collections.py index 8323db78..db89b122 100644 --- a/lib/jaraco/collections.py +++ b/lib/jaraco/collections.py @@ -63,7 +63,7 @@ class Projection(collections.abc.Mapping): return len(tuple(iter(self))) -class DictFilter(object): +class DictFilter(collections.abc.Mapping): """ Takes a dict, and simulates a sub-dict based on the keys. @@ -92,15 +92,21 @@ class DictFilter(object): ... KeyError: 'e' + >>> 'e' in filtered + False + + Pattern is useful for excluding keys with a prefix. + + >>> filtered = DictFilter(sample, include_pattern=r'(?![ace])') + >>> dict(filtered) + {'b': 2, 'd': 4} + Also note that DictFilter keeps a reference to the original dict, so if you modify the original dict, that could modify the filtered dict. >>> del sample['d'] - >>> del sample['a'] - >>> filtered == {'b': 2, 'c': 3} - True - >>> filtered != {'b': 2, 'c': 3} - False + >>> dict(filtered) + {'b': 2} """ def __init__(self, dict, include_keys=[], include_pattern=None): @@ -120,29 +126,18 @@ class DictFilter(object): @property def include_keys(self): - return self.specified_keys.union(self.pattern_keys) - - def keys(self): - return self.include_keys.intersection(self.dict.keys()) - - def values(self): - return map(self.dict.get, self.keys()) + return self.specified_keys | self.pattern_keys def __getitem__(self, i): if i not in self.include_keys: raise KeyError(i) return self.dict[i] - def items(self): - keys = self.keys() - values = map(self.dict.get, keys) - return zip(keys, values) + def __iter__(self): + return filter(self.include_keys.__contains__, self.dict.keys()) - def __eq__(self, other): - return dict(self) == other - - def __ne__(self, other): - return dict(self) != other + def __len__(self): + return len(list(self)) def dict_map(function, dictionary): @@ -167,7 +162,7 @@ class RangeMap(dict): the sorted list of keys. One may supply keyword parameters to be passed to the sort function used - to sort keys (i.e. cmp [python 2 only], keys, reverse) as sort_params. + to sort keys (i.e. key, reverse) as sort_params. Let's create a map that maps 1-3 -> 'a', 4-6 -> 'b' @@ -220,6 +215,23 @@ class RangeMap(dict): >>> r.get(7, 'not found') 'not found' + + One often wishes to define the ranges by their left-most values, + which requires use of sort params and a key_match_comparator. + + >>> r = RangeMap({1: 'a', 4: 'b'}, + ... sort_params=dict(reverse=True), + ... key_match_comparator=operator.ge) + >>> r[1], r[2], r[3], r[4], r[5], r[6] + ('a', 'a', 'a', 'b', 'b', 'b') + + That wasn't nearly as easy as before, so an alternate constructor + is provided: + + >>> r = RangeMap.left({1: 'a', 4: 'b', 7: RangeMap.undefined_value}) + >>> r[1], r[2], r[3], r[4], r[5], r[6] + ('a', 'a', 'a', 'b', 'b', 'b') + """ def __init__(self, source, sort_params={}, key_match_comparator=operator.le): @@ -227,6 +239,12 @@ class RangeMap(dict): self.sort_params = sort_params self.match = key_match_comparator + @classmethod + def left(cls, source): + return cls( + source, sort_params=dict(reverse=True), key_match_comparator=operator.ge + ) + def __getitem__(self, item): sorted_keys = sorted(self.keys(), **self.sort_params) if isinstance(item, RangeMap.Item): @@ -261,7 +279,7 @@ class RangeMap(dict): return (sorted_keys[RangeMap.first_item], sorted_keys[RangeMap.last_item]) # some special values for the RangeMap - undefined_value = type(str('RangeValueUndefined'), (object,), {})() + undefined_value = type(str('RangeValueUndefined'), (), {})() class Item(int): "RangeMap Item" @@ -370,7 +388,7 @@ class FoldedCaseKeyedDict(KeyTransformingDict): True >>> 'HELLO' in d True - >>> print(repr(FoldedCaseKeyedDict({'heLlo': 'world'})).replace("u'", "'")) + >>> print(repr(FoldedCaseKeyedDict({'heLlo': 'world'}))) {'heLlo': 'world'} >>> d = FoldedCaseKeyedDict({'heLlo': 'world'}) >>> print(d['hello']) @@ -433,7 +451,7 @@ class FoldedCaseKeyedDict(KeyTransformingDict): return jaraco.text.FoldedCase(key) -class DictAdapter(object): +class DictAdapter: """ Provide a getitem interface for attributes of an object. @@ -452,7 +470,7 @@ class DictAdapter(object): return getattr(self.object, name) -class ItemsAsAttributes(object): +class ItemsAsAttributes: """ Mix-in class to enable a mapping object to provide items as attributes. @@ -561,7 +579,7 @@ class IdentityOverrideMap(dict): return key -class DictStack(list, collections.abc.Mapping): +class DictStack(list, collections.abc.MutableMapping): """ A stack of dictionaries that behaves as a view on those dictionaries, giving preference to the last. @@ -578,11 +596,12 @@ class DictStack(list, collections.abc.Mapping): >>> stack.push(dict(a=3)) >>> stack['a'] 3 + >>> stack['a'] = 4 >>> set(stack.keys()) == set(['a', 'b', 'c']) True - >>> set(stack.items()) == set([('a', 3), ('b', 2), ('c', 2)]) + >>> set(stack.items()) == set([('a', 4), ('b', 2), ('c', 2)]) True - >>> dict(**stack) == dict(stack) == dict(a=3, c=2, b=2) + >>> dict(**stack) == dict(stack) == dict(a=4, c=2, b=2) True >>> d = stack.pop() >>> stack['a'] @@ -593,6 +612,9 @@ class DictStack(list, collections.abc.Mapping): >>> stack.get('b', None) >>> 'c' in stack True + >>> del stack['c'] + >>> dict(stack) + {'a': 1} """ def __iter__(self): @@ -613,6 +635,18 @@ class DictStack(list, collections.abc.Mapping): def __len__(self): return len(list(iter(self))) + def __setitem__(self, key, item): + last = list.__getitem__(self, -1) + return last.__setitem__(key, item) + + def __delitem__(self, key): + last = list.__getitem__(self, -1) + return last.__delitem__(key) + + # workaround for mypy confusion + def pop(self, *args, **kwargs): + return list.pop(self, *args, **kwargs) + class BijectiveMap(dict): """ @@ -855,7 +889,7 @@ class Enumeration(ItemsAsAttributes, BijectiveMap): return (self[name] for name in self.names) -class Everything(object): +class Everything: """ A collection "containing" every possible thing. @@ -896,7 +930,7 @@ class InstrumentedDict(collections.UserDict): # type: ignore # buggy mypy self.data = data -class Least(object): +class Least: """ A value that is always lesser than any other @@ -928,7 +962,7 @@ class Least(object): __gt__ = __ge__ -class Greatest(object): +class Greatest: """ A value that is always greater than any other diff --git a/lib/jaraco/text/__init__.py b/lib/jaraco/text/__init__.py index 5f75519a..e51101c2 100644 --- a/lib/jaraco/text/__init__.py +++ b/lib/jaraco/text/__init__.py @@ -66,7 +66,7 @@ class FoldedCase(str): >>> s in ["Hello World"] True - You may test for set inclusion, but candidate and elements + Allows testing for set inclusion, but candidate and elements must both be folded. >>> FoldedCase("Hello World") in {s} @@ -92,37 +92,40 @@ class FoldedCase(str): >>> FoldedCase('hello') > FoldedCase('Hello') False + + >>> FoldedCase('ß') == FoldedCase('ss') + True """ def __lt__(self, other): - return self.lower() < other.lower() + return self.casefold() < other.casefold() def __gt__(self, other): - return self.lower() > other.lower() + return self.casefold() > other.casefold() def __eq__(self, other): - return self.lower() == other.lower() + return self.casefold() == other.casefold() def __ne__(self, other): - return self.lower() != other.lower() + return self.casefold() != other.casefold() def __hash__(self): - return hash(self.lower()) + return hash(self.casefold()) def __contains__(self, other): - return super().lower().__contains__(other.lower()) + return super().casefold().__contains__(other.casefold()) def in_(self, other): "Does self appear in other?" return self in FoldedCase(other) - # cache lower since it's likely to be called frequently. + # cache casefold since it's likely to be called frequently. @method_cache - def lower(self): - return super().lower() + def casefold(self): + return super().casefold() def index(self, sub): - return self.lower().index(sub.lower()) + return self.casefold().index(sub.casefold()) def split(self, splitter=' ', maxsplit=0): pattern = re.compile(re.escape(splitter), re.I) @@ -277,7 +280,7 @@ class WordSet(tuple): >>> WordSet.parse("myABCClass") ('my', 'ABC', 'Class') - The result is a WordSet, so you can get the form you need. + The result is a WordSet, providing access to other forms. >>> WordSet.parse("myABCClass").underscore_separated() 'my_ABC_Class' @@ -598,3 +601,22 @@ def join_continuation(lines): except StopIteration: return yield item + + +def read_newlines(filename, limit=1024): + r""" + >>> tmp_path = getfixture('tmp_path') + >>> filename = tmp_path / 'out.txt' + >>> _ = filename.write_text('foo\n', newline='') + >>> read_newlines(filename) + '\n' + >>> _ = filename.write_text('foo\r\n', newline='') + >>> read_newlines(filename) + '\r\n' + >>> _ = filename.write_text('foo\r\nbar\nbing\r', newline='') + >>> read_newlines(filename) + ('\r', '\n', '\r\n') + """ + with open(filename) as fp: + fp.read(limit) + return fp.newlines diff --git a/lib/jaraco/text/layouts.py b/lib/jaraco/text/layouts.py new file mode 100644 index 00000000..9636f0f7 --- /dev/null +++ b/lib/jaraco/text/layouts.py @@ -0,0 +1,25 @@ +qwerty = "-=qwertyuiop[]asdfghjkl;'zxcvbnm,./_+QWERTYUIOP{}ASDFGHJKL:\"ZXCVBNM<>?" +dvorak = "[]',.pyfgcrl/=aoeuidhtns-;qjkxbmwvz{}\"<>PYFGCRL?+AOEUIDHTNS_:QJKXBMWVZ" + + +to_dvorak = str.maketrans(qwerty, dvorak) +to_qwerty = str.maketrans(dvorak, qwerty) + + +def translate(input, translation): + """ + >>> translate('dvorak', to_dvorak) + 'ekrpat' + >>> translate('qwerty', to_qwerty) + 'x,dokt' + """ + return input.translate(translation) + + +def _translate_stream(stream, translation): + """ + >>> import io + >>> _translate_stream(io.StringIO('foo'), to_dvorak) + urr + """ + print(translate(stream.read(), translation)) diff --git a/lib/jaraco/text/show-newlines.py b/lib/jaraco/text/show-newlines.py new file mode 100644 index 00000000..2ba32062 --- /dev/null +++ b/lib/jaraco/text/show-newlines.py @@ -0,0 +1,33 @@ +import autocommand +import inflect + +from more_itertools import always_iterable + +import jaraco.text + + +def report_newlines(filename): + r""" + Report the newlines in the indicated file. + + >>> tmp_path = getfixture('tmp_path') + >>> filename = tmp_path / 'out.txt' + >>> _ = filename.write_text('foo\nbar\n', newline='') + >>> report_newlines(filename) + newline is '\n' + >>> filename = tmp_path / 'out.txt' + >>> _ = filename.write_text('foo\nbar\r\n', newline='') + >>> report_newlines(filename) + newlines are ('\n', '\r\n') + """ + newlines = jaraco.text.read_newlines(filename) + count = len(tuple(always_iterable(newlines))) + engine = inflect.engine() + print( + engine.plural_noun("newline", count), + engine.plural_verb("is", count), + repr(newlines), + ) + + +autocommand.autocommand(__name__)(report_newlines) diff --git a/lib/jaraco/text/to-dvorak.py b/lib/jaraco/text/to-dvorak.py new file mode 100644 index 00000000..a6d5da80 --- /dev/null +++ b/lib/jaraco/text/to-dvorak.py @@ -0,0 +1,6 @@ +import sys + +from . import layouts + + +__name__ == '__main__' and layouts._translate_stream(sys.stdin, layouts.to_dvorak) diff --git a/lib/jaraco/text/to-qwerty.py b/lib/jaraco/text/to-qwerty.py new file mode 100644 index 00000000..abe27286 --- /dev/null +++ b/lib/jaraco/text/to-qwerty.py @@ -0,0 +1,6 @@ +import sys + +from . import layouts + + +__name__ == '__main__' and layouts._translate_stream(sys.stdin, layouts.to_qwerty) diff --git a/lib/more_itertools/__init__.py b/lib/more_itertools/__init__.py index ea38bef1..557bfc20 100644 --- a/lib/more_itertools/__init__.py +++ b/lib/more_itertools/__init__.py @@ -1,4 +1,6 @@ +"""More routines for operating on iterables, beyond itertools""" + from .more import * # noqa from .recipes import * # noqa -__version__ = '8.12.0' +__version__ = '9.0.0' diff --git a/lib/more_itertools/more.py b/lib/more_itertools/more.py index 630af973..7f73d6ed 100644 --- a/lib/more_itertools/more.py +++ b/lib/more_itertools/more.py @@ -2,9 +2,8 @@ import warnings from collections import Counter, defaultdict, deque, abc from collections.abc import Sequence -from concurrent.futures import ThreadPoolExecutor from functools import partial, reduce, wraps -from heapq import merge, heapify, heapreplace, heappop +from heapq import heapify, heapreplace, heappop from itertools import ( chain, compress, @@ -27,12 +26,16 @@ from sys import hexversion, maxsize from time import monotonic from .recipes import ( + _marker, + _zip_equal, + UnequalIterablesError, consume, flatten, pairwise, powerset, take, unique_everseen, + all_equal, ) __all__ = [ @@ -49,9 +52,9 @@ __all__ = [ 'chunked_even', 'circular_shifts', 'collapse', - 'collate', 'combination_index', 'consecutive_groups', + 'constrained_batches', 'consumer', 'count_cycle', 'countable', @@ -67,6 +70,7 @@ __all__ = [ 'first', 'groupby_transform', 'ichunked', + 'iequals', 'ilen', 'interleave', 'interleave_evenly', @@ -77,6 +81,7 @@ __all__ = [ 'iterate', 'last', 'locate', + 'longest_common_prefix', 'lstrip', 'make_decorator', 'map_except', @@ -133,9 +138,6 @@ __all__ = [ ] -_marker = object() - - def chunked(iterable, n, strict=False): """Break *iterable* into lists of length *n*: @@ -410,44 +412,6 @@ class peekable: return self._cache[index] -def collate(*iterables, **kwargs): - """Return a sorted merge of the items from each of several already-sorted - *iterables*. - - >>> list(collate('ACDZ', 'AZ', 'JKL')) - ['A', 'A', 'C', 'D', 'J', 'K', 'L', 'Z', 'Z'] - - Works lazily, keeping only the next value from each iterable in memory. Use - :func:`collate` to, for example, perform a n-way mergesort of items that - don't fit in memory. - - If a *key* function is specified, the iterables will be sorted according - to its result: - - >>> key = lambda s: int(s) # Sort by numeric value, not by string - >>> list(collate(['1', '10'], ['2', '11'], key=key)) - ['1', '2', '10', '11'] - - - If the *iterables* are sorted in descending order, set *reverse* to - ``True``: - - >>> list(collate([5, 3, 1], [4, 2, 0], reverse=True)) - [5, 4, 3, 2, 1, 0] - - If the elements of the passed-in iterables are out of order, you might get - unexpected results. - - On Python 3.5+, this function is an alias for :func:`heapq.merge`. - - """ - warnings.warn( - "collate is no longer part of more_itertools, use heapq.merge", - DeprecationWarning, - ) - return merge(*iterables, **kwargs) - - def consumer(func): """Decorator that automatically advances a PEP-342-style "reverse iterator" to its first yield point so you don't have to call ``next()`` on it @@ -873,7 +837,9 @@ def windowed(seq, n, fillvalue=None, step=1): yield tuple(window) size = len(window) - if size < n: + if size == 0: + return + elif size < n: yield tuple(chain(window, repeat(fillvalue, n - size))) elif 0 < i < min(step, n): window += (fillvalue,) * i @@ -1646,45 +1612,6 @@ def stagger(iterable, offsets=(-1, 0, 1), longest=False, fillvalue=None): ) -class UnequalIterablesError(ValueError): - def __init__(self, details=None): - msg = 'Iterables have different lengths' - if details is not None: - msg += (': index 0 has length {}; index {} has length {}').format( - *details - ) - - super().__init__(msg) - - -def _zip_equal_generator(iterables): - for combo in zip_longest(*iterables, fillvalue=_marker): - for val in combo: - if val is _marker: - raise UnequalIterablesError() - yield combo - - -def _zip_equal(*iterables): - # Check whether the iterables are all the same size. - try: - first_size = len(iterables[0]) - for i, it in enumerate(iterables[1:], 1): - size = len(it) - if size != first_size: - break - else: - # If we didn't break out, we can use the built-in zip. - return zip(*iterables) - - # If we did break out, there was a mismatch. - raise UnequalIterablesError(details=(first_size, i, size)) - # If any one of the iterables didn't have a length, start reading - # them until one runs out. - except TypeError: - return _zip_equal_generator(iterables) - - def zip_equal(*iterables): """``zip`` the input *iterables* together, but raise ``UnequalIterablesError`` if they aren't all the same length. @@ -1826,7 +1753,7 @@ def unzip(iterable): of the zipped *iterable*. The ``i``-th iterable contains the ``i``-th element from each element - of the zipped iterable. The first element is used to to determine the + of the zipped iterable. The first element is used to determine the length of the remaining elements. >>> iterable = [('a', 1), ('b', 2), ('c', 3), ('d', 4)] @@ -2376,6 +2303,16 @@ def locate(iterable, pred=bool, window_size=None): return compress(count(), starmap(pred, it)) +def longest_common_prefix(iterables): + """Yield elements of the longest common prefix amongst given *iterables*. + + >>> ''.join(longest_common_prefix(['abcd', 'abc', 'abf'])) + 'ab' + + """ + return (c[0] for c in takewhile(all_equal, zip(*iterables))) + + def lstrip(iterable, pred): """Yield the items from *iterable*, but strip any from the beginning for which *pred* returns ``True``. @@ -2684,7 +2621,7 @@ def difference(iterable, func=sub, *, initial=None): if initial is not None: first = [] - return chain(first, starmap(func, zip(b, a))) + return chain(first, map(func, b, a)) class SequenceView(Sequence): @@ -3327,6 +3264,27 @@ def only(iterable, default=None, too_long=None): return first_value +class _IChunk: + def __init__(self, iterable, n): + self._it = islice(iterable, n) + self._cache = deque() + + def fill_cache(self): + self._cache.extend(self._it) + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self._it) + except StopIteration: + if self._cache: + return self._cache.popleft() + else: + raise + + def ichunked(iterable, n): """Break *iterable* into sub-iterables with *n* elements each. :func:`ichunked` is like :func:`chunked`, but it yields iterables @@ -3348,20 +3306,39 @@ def ichunked(iterable, n): [8, 9, 10, 11] """ - source = iter(iterable) - + source = peekable(iter(iterable)) + ichunk_marker = object() while True: # Check to see whether we're at the end of the source iterable - item = next(source, _marker) - if item is _marker: + item = source.peek(ichunk_marker) + if item is ichunk_marker: return - # Clone the source and yield an n-length slice - source, it = tee(chain([item], source)) - yield islice(it, n) + chunk = _IChunk(source, n) + yield chunk - # Advance the source iterable - consume(source, n) + # Advance the source iterable and fill previous chunk's cache + chunk.fill_cache() + + +def iequals(*iterables): + """Return ``True`` if all given *iterables* are equal to each other, + which means that they contain the same elements in the same order. + + The function is useful for comparing iterables of different data types + or iterables that do not support equality checks. + + >>> iequals("abc", ['a', 'b', 'c'], ('a', 'b', 'c'), iter("abc")) + True + + >>> iequals("abc", "acb") + False + + Not to be confused with :func:`all_equals`, which checks whether all + elements of iterable are equal to each other. + + """ + return all(map(all_equal, zip_longest(*iterables, fillvalue=object()))) def distinct_combinations(iterable, r): @@ -3656,7 +3633,10 @@ class callback_iter: self._aborted = False self._future = None self._wait_seconds = wait_seconds - self._executor = ThreadPoolExecutor(max_workers=1) + # Lazily import concurrent.future + self._executor = __import__( + 'concurrent.futures' + ).futures.ThreadPoolExecutor(max_workers=1) self._iterator = self._reader() def __enter__(self): @@ -3961,7 +3941,7 @@ def combination_index(element, iterable): n, _ = last(pool, default=(n, None)) - # Python versiosn below 3.8 don't have math.comb + # Python versions below 3.8 don't have math.comb index = 1 for i, j in enumerate(reversed(indexes), start=1): j = n - j @@ -4114,7 +4094,7 @@ def zip_broadcast(*objects, scalar_types=(str, bytes), strict=False): If the *strict* keyword argument is ``True``, then ``UnequalIterablesError`` will be raised if any of the iterables have - different lengthss. + different lengths. """ def is_scalar(obj): @@ -4315,3 +4295,53 @@ def minmax(iterable_or_value, *others, key=None, default=_marker): hi, hi_key = y, y_key return lo, hi + + +def constrained_batches( + iterable, max_size, max_count=None, get_len=len, strict=True +): + """Yield batches of items from *iterable* with a combined size limited by + *max_size*. + + >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] + >>> list(constrained_batches(iterable, 10)) + [(b'12345', b'123'), (b'12345678', b'1', b'1'), (b'12', b'1')] + + If a *max_count* is supplied, the number of items per batch is also + limited: + + >>> iterable = [b'12345', b'123', b'12345678', b'1', b'1', b'12', b'1'] + >>> list(constrained_batches(iterable, 10, max_count = 2)) + [(b'12345', b'123'), (b'12345678', b'1'), (b'1', b'12'), (b'1',)] + + If a *get_len* function is supplied, use that instead of :func:`len` to + determine item size. + + If *strict* is ``True``, raise ``ValueError`` if any single item is bigger + than *max_size*. Otherwise, allow single items to exceed *max_size*. + """ + if max_size <= 0: + raise ValueError('maximum size must be greater than zero') + + batch = [] + batch_size = 0 + batch_count = 0 + for item in iterable: + item_len = get_len(item) + if strict and item_len > max_size: + raise ValueError('item size exceeds maximum size') + + reached_count = batch_count == max_count + reached_size = item_len + batch_size > max_size + if batch_count and (reached_size or reached_count): + yield tuple(batch) + batch.clear() + batch_size = 0 + batch_count = 0 + + batch.append(item) + batch_size += item_len + batch_count += 1 + + if batch: + yield tuple(batch) diff --git a/lib/more_itertools/more.pyi b/lib/more_itertools/more.pyi index fe7d4bdd..1413fae7 100644 --- a/lib/more_itertools/more.pyi +++ b/lib/more_itertools/more.pyi @@ -72,7 +72,6 @@ class peekable(Generic[_T], Iterator[_T]): @overload def __getitem__(self, index: slice) -> List[_T]: ... -def collate(*iterables: Iterable[_T], **kwargs: Any) -> Iterable[_T]: ... def consumer(func: _GenFn) -> _GenFn: ... def ilen(iterable: Iterable[object]) -> int: ... def iterate(func: Callable[[_T], _T], start: _T) -> Iterator[_T]: ... @@ -179,7 +178,7 @@ def padded( iterable: Iterable[_T], *, n: Optional[int] = ..., - next_multiple: bool = ... + next_multiple: bool = ..., ) -> Iterator[Optional[_T]]: ... @overload def padded( @@ -225,7 +224,7 @@ def zip_equal( __iter1: Iterable[_T], __iter2: Iterable[_T], __iter3: Iterable[_T], - *iterables: Iterable[_T] + *iterables: Iterable[_T], ) -> Iterator[Tuple[_T, ...]]: ... @overload def zip_offset( @@ -233,7 +232,7 @@ def zip_offset( *, offsets: _SizedIterable[int], longest: bool = ..., - fillvalue: None = None + fillvalue: None = None, ) -> Iterator[Tuple[Optional[_T1]]]: ... @overload def zip_offset( @@ -242,7 +241,7 @@ def zip_offset( *, offsets: _SizedIterable[int], longest: bool = ..., - fillvalue: None = None + fillvalue: None = None, ) -> Iterator[Tuple[Optional[_T1], Optional[_T2]]]: ... @overload def zip_offset( @@ -252,7 +251,7 @@ def zip_offset( *iterables: Iterable[_T], offsets: _SizedIterable[int], longest: bool = ..., - fillvalue: None = None + fillvalue: None = None, ) -> Iterator[Tuple[Optional[_T], ...]]: ... @overload def zip_offset( @@ -420,7 +419,7 @@ def difference( iterable: Iterable[_T], func: Callable[[_T, _T], _U] = ..., *, - initial: None = ... + initial: None = ..., ) -> Iterator[Union[_T, _U]]: ... @overload def difference( @@ -529,12 +528,12 @@ def distinct_combinations( def filter_except( validator: Callable[[Any], object], iterable: Iterable[_T], - *exceptions: Type[BaseException] + *exceptions: Type[BaseException], ) -> Iterator[_T]: ... def map_except( function: Callable[[Any], _U], iterable: Iterable[_T], - *exceptions: Type[BaseException] + *exceptions: Type[BaseException], ) -> Iterator[_U]: ... def map_if( iterable: Iterable[Any], @@ -610,7 +609,7 @@ def zip_broadcast( scalar_types: Union[ type, Tuple[Union[type, Tuple[Any, ...]], ...], None ] = ..., - strict: bool = ... + strict: bool = ..., ) -> Iterable[Tuple[_T, ...]]: ... def unique_in_window( iterable: Iterable[_T], n: int, key: Optional[Callable[[_T], _U]] = ... @@ -640,7 +639,7 @@ def minmax( iterable_or_value: Iterable[_SupportsLessThanT], *, key: None = None, - default: _U + default: _U, ) -> Union[_U, Tuple[_SupportsLessThanT, _SupportsLessThanT]]: ... @overload def minmax( @@ -653,12 +652,23 @@ def minmax( def minmax( iterable_or_value: _SupportsLessThanT, __other: _SupportsLessThanT, - *others: _SupportsLessThanT + *others: _SupportsLessThanT, ) -> Tuple[_SupportsLessThanT, _SupportsLessThanT]: ... @overload def minmax( iterable_or_value: _T, __other: _T, *others: _T, - key: Callable[[_T], _SupportsLessThan] + key: Callable[[_T], _SupportsLessThan], ) -> Tuple[_T, _T]: ... +def longest_common_prefix( + iterables: Iterable[Iterable[_T]], +) -> Iterator[_T]: ... +def iequals(*iterables: Iterable[object]) -> bool: ... +def constrained_batches( + iterable: Iterable[object], + max_size: int, + max_count: Optional[int] = ..., + get_len: Callable[[_T], object] = ..., + strict: bool = ..., +) -> Iterator[Tuple[_T]]: ... diff --git a/lib/more_itertools/recipes.py b/lib/more_itertools/recipes.py index a2596423..85796207 100644 --- a/lib/more_itertools/recipes.py +++ b/lib/more_itertools/recipes.py @@ -7,11 +7,16 @@ Some backward-compatible usability improvements have been made. .. [1] http://docs.python.org/library/itertools.html#recipes """ -import warnings +import math +import operator + from collections import deque +from collections.abc import Sized +from functools import reduce from itertools import ( chain, combinations, + compress, count, cycle, groupby, @@ -21,11 +26,11 @@ from itertools import ( tee, zip_longest, ) -import operator from random import randrange, sample, choice __all__ = [ 'all_equal', + 'batched', 'before_and_after', 'consume', 'convolve', @@ -41,6 +46,7 @@ __all__ = [ 'pad_none', 'pairwise', 'partition', + 'polynomial_from_roots', 'powerset', 'prepend', 'quantify', @@ -50,7 +56,9 @@ __all__ = [ 'random_product', 'repeatfunc', 'roundrobin', + 'sieve', 'sliding_window', + 'subslices', 'tabulate', 'tail', 'take', @@ -59,6 +67,8 @@ __all__ = [ 'unique_justseen', ] +_marker = object() + def take(n, iterable): """Return first *n* items of the iterable as a list. @@ -102,7 +112,14 @@ def tail(n, iterable): ['E', 'F', 'G'] """ - return iter(deque(iterable, maxlen=n)) + # If the given iterable has a length, then we can use islice to get its + # final elements. Note that if the iterable is not actually Iterable, + # either islice or deque will throw a TypeError. This is why we don't + # check if it is Iterable. + if isinstance(iterable, Sized): + yield from islice(iterable, max(0, len(iterable) - n), None) + else: + yield from iter(deque(iterable, maxlen=n)) def consume(iterator, n=None): @@ -284,20 +301,83 @@ else: pairwise.__doc__ = _pairwise.__doc__ -def grouper(iterable, n, fillvalue=None): - """Collect data into fixed-length chunks or blocks. +class UnequalIterablesError(ValueError): + def __init__(self, details=None): + msg = 'Iterables have different lengths' + if details is not None: + msg += (': index 0 has length {}; index {} has length {}').format( + *details + ) - >>> list(grouper('ABCDEFG', 3, 'x')) + super().__init__(msg) + + +def _zip_equal_generator(iterables): + for combo in zip_longest(*iterables, fillvalue=_marker): + for val in combo: + if val is _marker: + raise UnequalIterablesError() + yield combo + + +def _zip_equal(*iterables): + # Check whether the iterables are all the same size. + try: + first_size = len(iterables[0]) + for i, it in enumerate(iterables[1:], 1): + size = len(it) + if size != first_size: + break + else: + # If we didn't break out, we can use the built-in zip. + return zip(*iterables) + + # If we did break out, there was a mismatch. + raise UnequalIterablesError(details=(first_size, i, size)) + # If any one of the iterables didn't have a length, start reading + # them until one runs out. + except TypeError: + return _zip_equal_generator(iterables) + + +def grouper(iterable, n, incomplete='fill', fillvalue=None): + """Group elements from *iterable* into fixed-length groups of length *n*. + + >>> list(grouper('ABCDEF', 3)) + [('A', 'B', 'C'), ('D', 'E', 'F')] + + The keyword arguments *incomplete* and *fillvalue* control what happens for + iterables whose length is not a multiple of *n*. + + When *incomplete* is `'fill'`, the last group will contain instances of + *fillvalue*. + + >>> list(grouper('ABCDEFG', 3, incomplete='fill', fillvalue='x')) [('A', 'B', 'C'), ('D', 'E', 'F'), ('G', 'x', 'x')] + When *incomplete* is `'ignore'`, the last group will not be emitted. + + >>> list(grouper('ABCDEFG', 3, incomplete='ignore', fillvalue='x')) + [('A', 'B', 'C'), ('D', 'E', 'F')] + + When *incomplete* is `'strict'`, a subclass of `ValueError` will be raised. + + >>> it = grouper('ABCDEFG', 3, incomplete='strict') + >>> list(it) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + UnequalIterablesError + """ - if isinstance(iterable, int): - warnings.warn( - "grouper expects iterable as first parameter", DeprecationWarning - ) - n, iterable = iterable, n args = [iter(iterable)] * n - return zip_longest(fillvalue=fillvalue, *args) + if incomplete == 'fill': + return zip_longest(*args, fillvalue=fillvalue) + if incomplete == 'strict': + return _zip_equal(*args) + if incomplete == 'ignore': + return zip(*args) + else: + raise ValueError('Expected fill, strict, or ignore') def roundrobin(*iterables): @@ -658,11 +738,12 @@ def before_and_after(predicate, it): transition.append(elem) return - def remainder_iterator(): - yield from transition - yield from it + # Note: this is different from itertools recipes to allow nesting + # before_and_after remainders into before_and_after again. See tests + # for an example. + remainder_iterator = chain(transition, it) - return true_iterator(), remainder_iterator() + return true_iterator(), remainder_iterator def triplewise(iterable): @@ -696,3 +777,65 @@ def sliding_window(iterable, n): for x in it: window.append(x) yield tuple(window) + + +def subslices(iterable): + """Return all contiguous non-empty subslices of *iterable*. + + >>> list(subslices('ABC')) + [['A'], ['A', 'B'], ['A', 'B', 'C'], ['B'], ['B', 'C'], ['C']] + + This is similar to :func:`substrings`, but emits items in a different + order. + """ + seq = list(iterable) + slices = starmap(slice, combinations(range(len(seq) + 1), 2)) + return map(operator.getitem, repeat(seq), slices) + + +def polynomial_from_roots(roots): + """Compute a polynomial's coefficients from its roots. + + >>> roots = [5, -4, 3] # (x - 5) * (x + 4) * (x - 3) + >>> polynomial_from_roots(roots) # x^3 - 4 * x^2 - 17 * x + 60 + [1, -4, -17, 60] + """ + # Use math.prod for Python 3.8+, + prod = getattr(math, 'prod', lambda x: reduce(operator.mul, x, 1)) + roots = list(map(operator.neg, roots)) + return [ + sum(map(prod, combinations(roots, k))) for k in range(len(roots) + 1) + ] + + +def sieve(n): + """Yield the primes less than n. + + >>> list(sieve(30)) + [2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + """ + isqrt = getattr(math, 'isqrt', lambda x: int(math.sqrt(x))) + limit = isqrt(n) + 1 + data = bytearray([1]) * n + data[:2] = 0, 0 + for p in compress(range(limit), data): + data[p + p : n : p] = bytearray(len(range(p + p, n, p))) + + return compress(count(), data) + + +def batched(iterable, n): + """Batch data into lists of length *n*. The last batch may be shorter. + + >>> list(batched('ABCDEFG', 3)) + [['A', 'B', 'C'], ['D', 'E', 'F'], ['G']] + + This recipe is from the ``itertools`` docs. This library also provides + :func:`chunked`, which has a different implementation. + """ + it = iter(iterable) + while True: + batch = list(islice(it, n)) + if not batch: + break + yield batch diff --git a/lib/more_itertools/recipes.pyi b/lib/more_itertools/recipes.pyi index 4648a41b..29415c5a 100644 --- a/lib/more_itertools/recipes.pyi +++ b/lib/more_itertools/recipes.pyi @@ -6,6 +6,7 @@ from typing import ( Iterator, List, Optional, + Sequence, Tuple, TypeVar, Union, @@ -39,21 +40,11 @@ def repeatfunc( func: Callable[..., _U], times: Optional[int] = ..., *args: Any ) -> Iterator[_U]: ... def pairwise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T]]: ... -@overload def grouper( - iterable: Iterable[_T], n: int -) -> Iterator[Tuple[Optional[_T], ...]]: ... -@overload -def grouper( - iterable: Iterable[_T], n: int, fillvalue: _U -) -> Iterator[Tuple[Union[_T, _U], ...]]: ... -@overload -def grouper( # Deprecated interface - iterable: int, n: Iterable[_T] -) -> Iterator[Tuple[Optional[_T], ...]]: ... -@overload -def grouper( # Deprecated interface - iterable: int, n: Iterable[_T], fillvalue: _U + iterable: Iterable[_T], + n: int, + incomplete: str = ..., + fillvalue: _U = ..., ) -> Iterator[Tuple[Union[_T, _U], ...]]: ... def roundrobin(*iterables: Iterable[_T]) -> Iterator[_T]: ... def partition( @@ -110,3 +101,10 @@ def triplewise(iterable: Iterable[_T]) -> Iterator[Tuple[_T, _T, _T]]: ... def sliding_window( iterable: Iterable[_T], n: int ) -> Iterator[Tuple[_T, ...]]: ... +def subslices(iterable: Iterable[_T]) -> Iterator[List[_T]]: ... +def polynomial_from_roots(roots: Sequence[int]) -> List[int]: ... +def sieve(n: int) -> Iterator[int]: ... +def batched( + iterable: Iterable[_T], + n: int, +) -> Iterator[List[_T]]: ... diff --git a/lib/pydantic/__init__.py b/lib/pydantic/__init__.py new file mode 100644 index 00000000..3bf1418f --- /dev/null +++ b/lib/pydantic/__init__.py @@ -0,0 +1,131 @@ +# flake8: noqa +from . import dataclasses +from .annotated_types import create_model_from_namedtuple, create_model_from_typeddict +from .class_validators import root_validator, validator +from .config import BaseConfig, ConfigDict, Extra +from .decorator import validate_arguments +from .env_settings import BaseSettings +from .error_wrappers import ValidationError +from .errors import * +from .fields import Field, PrivateAttr, Required +from .main import * +from .networks import * +from .parse import Protocol +from .tools import * +from .types import * +from .version import VERSION, compiled + +__version__ = VERSION + +# WARNING __all__ from .errors is not included here, it will be removed as an export here in v2 +# please use "from pydantic.errors import ..." instead +__all__ = [ + # annotated types utils + 'create_model_from_namedtuple', + 'create_model_from_typeddict', + # dataclasses + 'dataclasses', + # class_validators + 'root_validator', + 'validator', + # config + 'BaseConfig', + 'ConfigDict', + 'Extra', + # decorator + 'validate_arguments', + # env_settings + 'BaseSettings', + # error_wrappers + 'ValidationError', + # fields + 'Field', + 'Required', + # main + 'BaseModel', + 'create_model', + 'validate_model', + # network + 'AnyUrl', + 'AnyHttpUrl', + 'FileUrl', + 'HttpUrl', + 'stricturl', + 'EmailStr', + 'NameEmail', + 'IPvAnyAddress', + 'IPvAnyInterface', + 'IPvAnyNetwork', + 'PostgresDsn', + 'CockroachDsn', + 'AmqpDsn', + 'RedisDsn', + 'MongoDsn', + 'KafkaDsn', + 'validate_email', + # parse + 'Protocol', + # tools + 'parse_file_as', + 'parse_obj_as', + 'parse_raw_as', + 'schema_of', + 'schema_json_of', + # types + 'NoneStr', + 'NoneBytes', + 'StrBytes', + 'NoneStrBytes', + 'StrictStr', + 'ConstrainedBytes', + 'conbytes', + 'ConstrainedList', + 'conlist', + 'ConstrainedSet', + 'conset', + 'ConstrainedFrozenSet', + 'confrozenset', + 'ConstrainedStr', + 'constr', + 'PyObject', + 'ConstrainedInt', + 'conint', + 'PositiveInt', + 'NegativeInt', + 'NonNegativeInt', + 'NonPositiveInt', + 'ConstrainedFloat', + 'confloat', + 'PositiveFloat', + 'NegativeFloat', + 'NonNegativeFloat', + 'NonPositiveFloat', + 'FiniteFloat', + 'ConstrainedDecimal', + 'condecimal', + 'ConstrainedDate', + 'condate', + 'UUID1', + 'UUID3', + 'UUID4', + 'UUID5', + 'FilePath', + 'DirectoryPath', + 'Json', + 'JsonWrapper', + 'SecretField', + 'SecretStr', + 'SecretBytes', + 'StrictBool', + 'StrictBytes', + 'StrictInt', + 'StrictFloat', + 'PaymentCardNumber', + 'PrivateAttr', + 'ByteSize', + 'PastDate', + 'FutureDate', + # version + 'compiled', + 'VERSION', +] diff --git a/lib/pydantic/_hypothesis_plugin.py b/lib/pydantic/_hypothesis_plugin.py new file mode 100644 index 00000000..a56d2b98 --- /dev/null +++ b/lib/pydantic/_hypothesis_plugin.py @@ -0,0 +1,386 @@ +""" +Register Hypothesis strategies for Pydantic custom types. + +This enables fully-automatic generation of test data for most Pydantic classes. + +Note that this module has *no* runtime impact on Pydantic itself; instead it +is registered as a setuptools entry point and Hypothesis will import it if +Pydantic is installed. See also: + +https://hypothesis.readthedocs.io/en/latest/strategies.html#registering-strategies-via-setuptools-entry-points +https://hypothesis.readthedocs.io/en/latest/data.html#hypothesis.strategies.register_type_strategy +https://hypothesis.readthedocs.io/en/latest/strategies.html#interaction-with-pytest-cov +https://pydantic-docs.helpmanual.io/usage/types/#pydantic-types + +Note that because our motivation is to *improve user experience*, the strategies +are always sound (never generate invalid data) but sacrifice completeness for +maintainability (ie may be unable to generate some tricky but valid data). + +Finally, this module makes liberal use of `# type: ignore[]` pragmas. +This is because Hypothesis annotates `register_type_strategy()` with +`(T, SearchStrategy[T])`, but in most cases we register e.g. `ConstrainedInt` +to generate instances of the builtin `int` type which match the constraints. +""" + +import contextlib +import datetime +import ipaddress +import json +import math +from fractions import Fraction +from typing import Callable, Dict, Type, Union, cast, overload + +import hypothesis.strategies as st + +import pydantic +import pydantic.color +import pydantic.types +from pydantic.utils import lenient_issubclass + +# FilePath and DirectoryPath are explicitly unsupported, as we'd have to create +# them on-disk, and that's unsafe in general without being told *where* to do so. +# +# URLs are unsupported because it's easy for users to define their own strategy for +# "normal" URLs, and hard for us to define a general strategy which includes "weird" +# URLs but doesn't also have unpredictable performance problems. +# +# conlist() and conset() are unsupported for now, because the workarounds for +# Cython and Hypothesis to handle parametrized generic types are incompatible. +# Once Cython can support 'normal' generics we'll revisit this. + +# Emails +try: + import email_validator +except ImportError: # pragma: no cover + pass +else: + + def is_valid_email(s: str) -> bool: + # Hypothesis' st.emails() occasionally generates emails like 0@A0--0.ac + # that are invalid according to email-validator, so we filter those out. + try: + email_validator.validate_email(s, check_deliverability=False) + return True + except email_validator.EmailNotValidError: # pragma: no cover + return False + + # Note that these strategies deliberately stay away from any tricky Unicode + # or other encoding issues; we're just trying to generate *something* valid. + st.register_type_strategy(pydantic.EmailStr, st.emails().filter(is_valid_email)) # type: ignore[arg-type] + st.register_type_strategy( + pydantic.NameEmail, + st.builds( + '{} <{}>'.format, # type: ignore[arg-type] + st.from_regex('[A-Za-z0-9_]+( [A-Za-z0-9_]+){0,5}', fullmatch=True), + st.emails().filter(is_valid_email), + ), + ) + +# PyObject - dotted names, in this case taken from the math module. +st.register_type_strategy( + pydantic.PyObject, # type: ignore[arg-type] + st.sampled_from( + [cast(pydantic.PyObject, f'math.{name}') for name in sorted(vars(math)) if not name.startswith('_')] + ), +) + +# CSS3 Colors; as name, hex, rgb(a) tuples or strings, or hsl strings +_color_regexes = ( + '|'.join( + ( + pydantic.color.r_hex_short, + pydantic.color.r_hex_long, + pydantic.color.r_rgb, + pydantic.color.r_rgba, + pydantic.color.r_hsl, + pydantic.color.r_hsla, + ) + ) + # Use more precise regex patterns to avoid value-out-of-range errors + .replace(pydantic.color._r_sl, r'(?:(\d\d?(?:\.\d+)?|100(?:\.0+)?)%)') + .replace(pydantic.color._r_alpha, r'(?:(0(?:\.\d+)?|1(?:\.0+)?|\.\d+|\d{1,2}%))') + .replace(pydantic.color._r_255, r'(?:((?:\d|\d\d|[01]\d\d|2[0-4]\d|25[0-4])(?:\.\d+)?|255(?:\.0+)?))') +) +st.register_type_strategy( + pydantic.color.Color, + st.one_of( + st.sampled_from(sorted(pydantic.color.COLORS_BY_NAME)), + st.tuples( + st.integers(0, 255), + st.integers(0, 255), + st.integers(0, 255), + st.none() | st.floats(0, 1) | st.floats(0, 100).map('{}%'.format), + ), + st.from_regex(_color_regexes, fullmatch=True), + ), +) + + +# Card numbers, valid according to the Luhn algorithm + + +def add_luhn_digit(card_number: str) -> str: + # See https://en.wikipedia.org/wiki/Luhn_algorithm + for digit in '0123456789': + with contextlib.suppress(Exception): + pydantic.PaymentCardNumber.validate_luhn_check_digit(card_number + digit) + return card_number + digit + raise AssertionError('Unreachable') # pragma: no cover + + +card_patterns = ( + # Note that these patterns omit the Luhn check digit; that's added by the function above + '4[0-9]{14}', # Visa + '5[12345][0-9]{13}', # Mastercard + '3[47][0-9]{12}', # American Express + '[0-26-9][0-9]{10,17}', # other (incomplete to avoid overlap) +) +st.register_type_strategy( + pydantic.PaymentCardNumber, + st.from_regex('|'.join(card_patterns), fullmatch=True).map(add_luhn_digit), # type: ignore[arg-type] +) + +# UUIDs +st.register_type_strategy(pydantic.UUID1, st.uuids(version=1)) +st.register_type_strategy(pydantic.UUID3, st.uuids(version=3)) +st.register_type_strategy(pydantic.UUID4, st.uuids(version=4)) +st.register_type_strategy(pydantic.UUID5, st.uuids(version=5)) + +# Secrets +st.register_type_strategy(pydantic.SecretBytes, st.binary().map(pydantic.SecretBytes)) +st.register_type_strategy(pydantic.SecretStr, st.text().map(pydantic.SecretStr)) + +# IP addresses, networks, and interfaces +st.register_type_strategy(pydantic.IPvAnyAddress, st.ip_addresses()) # type: ignore[arg-type] +st.register_type_strategy( + pydantic.IPvAnyInterface, + st.from_type(ipaddress.IPv4Interface) | st.from_type(ipaddress.IPv6Interface), # type: ignore[arg-type] +) +st.register_type_strategy( + pydantic.IPvAnyNetwork, + st.from_type(ipaddress.IPv4Network) | st.from_type(ipaddress.IPv6Network), # type: ignore[arg-type] +) + +# We hook into the con***() functions and the ConstrainedNumberMeta metaclass, +# so here we only have to register subclasses for other constrained types which +# don't go via those mechanisms. Then there are the registration hooks below. +st.register_type_strategy(pydantic.StrictBool, st.booleans()) +st.register_type_strategy(pydantic.StrictStr, st.text()) + + +# Constrained-type resolver functions +# +# For these ones, we actually want to inspect the type in order to work out a +# satisfying strategy. First up, the machinery for tracking resolver functions: + +RESOLVERS: Dict[type, Callable[[type], st.SearchStrategy]] = {} # type: ignore[type-arg] + + +@overload +def _registered(typ: Type[pydantic.types.T]) -> Type[pydantic.types.T]: + pass + + +@overload +def _registered(typ: pydantic.types.ConstrainedNumberMeta) -> pydantic.types.ConstrainedNumberMeta: + pass + + +def _registered( + typ: Union[Type[pydantic.types.T], pydantic.types.ConstrainedNumberMeta] +) -> Union[Type[pydantic.types.T], pydantic.types.ConstrainedNumberMeta]: + # This function replaces the version in `pydantic.types`, in order to + # effect the registration of new constrained types so that Hypothesis + # can generate valid examples. + pydantic.types._DEFINED_TYPES.add(typ) + for supertype, resolver in RESOLVERS.items(): + if issubclass(typ, supertype): + st.register_type_strategy(typ, resolver(typ)) # type: ignore + return typ + raise NotImplementedError(f'Unknown type {typ!r} has no resolver to register') # pragma: no cover + + +def resolves( + typ: Union[type, pydantic.types.ConstrainedNumberMeta] +) -> Callable[[Callable[..., st.SearchStrategy]], Callable[..., st.SearchStrategy]]: # type: ignore[type-arg] + def inner(f): # type: ignore + assert f not in RESOLVERS + RESOLVERS[typ] = f + return f + + return inner + + +# Type-to-strategy resolver functions + + +@resolves(pydantic.JsonWrapper) +def resolve_json(cls): # type: ignore[no-untyped-def] + try: + inner = st.none() if cls.inner_type is None else st.from_type(cls.inner_type) + except Exception: # pragma: no cover + finite = st.floats(allow_infinity=False, allow_nan=False) + inner = st.recursive( + base=st.one_of(st.none(), st.booleans(), st.integers(), finite, st.text()), + extend=lambda x: st.lists(x) | st.dictionaries(st.text(), x), # type: ignore + ) + inner_type = getattr(cls, 'inner_type', None) + return st.builds( + cls.inner_type.json if lenient_issubclass(inner_type, pydantic.BaseModel) else json.dumps, + inner, + ensure_ascii=st.booleans(), + indent=st.none() | st.integers(0, 16), + sort_keys=st.booleans(), + ) + + +@resolves(pydantic.ConstrainedBytes) +def resolve_conbytes(cls): # type: ignore[no-untyped-def] # pragma: no cover + min_size = cls.min_length or 0 + max_size = cls.max_length + if not cls.strip_whitespace: + return st.binary(min_size=min_size, max_size=max_size) + # Fun with regex to ensure we neither start nor end with whitespace + repeats = '{{{},{}}}'.format( + min_size - 2 if min_size > 2 else 0, + max_size - 2 if (max_size or 0) > 2 else '', + ) + if min_size >= 2: + pattern = rf'\W.{repeats}\W' + elif min_size == 1: + pattern = rf'\W(.{repeats}\W)?' + else: + assert min_size == 0 + pattern = rf'(\W(.{repeats}\W)?)?' + return st.from_regex(pattern.encode(), fullmatch=True) + + +@resolves(pydantic.ConstrainedDecimal) +def resolve_condecimal(cls): # type: ignore[no-untyped-def] + min_value = cls.ge + max_value = cls.le + if cls.gt is not None: + assert min_value is None, 'Set `gt` or `ge`, but not both' + min_value = cls.gt + if cls.lt is not None: + assert max_value is None, 'Set `lt` or `le`, but not both' + max_value = cls.lt + s = st.decimals(min_value, max_value, allow_nan=False, places=cls.decimal_places) + if cls.lt is not None: + s = s.filter(lambda d: d < cls.lt) + if cls.gt is not None: + s = s.filter(lambda d: cls.gt < d) + return s + + +@resolves(pydantic.ConstrainedFloat) +def resolve_confloat(cls): # type: ignore[no-untyped-def] + min_value = cls.ge + max_value = cls.le + exclude_min = False + exclude_max = False + + if cls.gt is not None: + assert min_value is None, 'Set `gt` or `ge`, but not both' + min_value = cls.gt + exclude_min = True + if cls.lt is not None: + assert max_value is None, 'Set `lt` or `le`, but not both' + max_value = cls.lt + exclude_max = True + + if cls.multiple_of is None: + return st.floats(min_value, max_value, exclude_min=exclude_min, exclude_max=exclude_max, allow_nan=False) + + if min_value is not None: + min_value = math.ceil(min_value / cls.multiple_of) + if exclude_min: + min_value = min_value + 1 + if max_value is not None: + assert max_value >= cls.multiple_of, 'Cannot build model with max value smaller than multiple of' + max_value = math.floor(max_value / cls.multiple_of) + if exclude_max: + max_value = max_value - 1 + + return st.integers(min_value, max_value).map(lambda x: x * cls.multiple_of) + + +@resolves(pydantic.ConstrainedInt) +def resolve_conint(cls): # type: ignore[no-untyped-def] + min_value = cls.ge + max_value = cls.le + if cls.gt is not None: + assert min_value is None, 'Set `gt` or `ge`, but not both' + min_value = cls.gt + 1 + if cls.lt is not None: + assert max_value is None, 'Set `lt` or `le`, but not both' + max_value = cls.lt - 1 + + if cls.multiple_of is None or cls.multiple_of == 1: + return st.integers(min_value, max_value) + + # These adjustments and the .map handle integer-valued multiples, while the + # .filter handles trickier cases as for confloat. + if min_value is not None: + min_value = math.ceil(Fraction(min_value) / Fraction(cls.multiple_of)) + if max_value is not None: + max_value = math.floor(Fraction(max_value) / Fraction(cls.multiple_of)) + return st.integers(min_value, max_value).map(lambda x: x * cls.multiple_of) + + +@resolves(pydantic.ConstrainedDate) +def resolve_condate(cls): # type: ignore[no-untyped-def] + if cls.ge is not None: + assert cls.gt is None, 'Set `gt` or `ge`, but not both' + min_value = cls.ge + elif cls.gt is not None: + min_value = cls.gt + datetime.timedelta(days=1) + else: + min_value = datetime.date.min + if cls.le is not None: + assert cls.lt is None, 'Set `lt` or `le`, but not both' + max_value = cls.le + elif cls.lt is not None: + max_value = cls.lt - datetime.timedelta(days=1) + else: + max_value = datetime.date.max + return st.dates(min_value, max_value) + + +@resolves(pydantic.ConstrainedStr) +def resolve_constr(cls): # type: ignore[no-untyped-def] # pragma: no cover + min_size = cls.min_length or 0 + max_size = cls.max_length + + if cls.regex is None and not cls.strip_whitespace: + return st.text(min_size=min_size, max_size=max_size) + + if cls.regex is not None: + strategy = st.from_regex(cls.regex) + if cls.strip_whitespace: + strategy = strategy.filter(lambda s: s == s.strip()) + elif cls.strip_whitespace: + repeats = '{{{},{}}}'.format( + min_size - 2 if min_size > 2 else 0, + max_size - 2 if (max_size or 0) > 2 else '', + ) + if min_size >= 2: + strategy = st.from_regex(rf'\W.{repeats}\W') + elif min_size == 1: + strategy = st.from_regex(rf'\W(.{repeats}\W)?') + else: + assert min_size == 0 + strategy = st.from_regex(rf'(\W(.{repeats}\W)?)?') + + if min_size == 0 and max_size is None: + return strategy + elif max_size is None: + return strategy.filter(lambda s: min_size <= len(s)) + return strategy.filter(lambda s: min_size <= len(s) <= max_size) + + +# Finally, register all previously-defined types, and patch in our new function +for typ in list(pydantic.types._DEFINED_TYPES): + _registered(typ) +pydantic.types._registered = _registered +st.register_type_strategy(pydantic.Json, resolve_json) diff --git a/lib/pydantic/annotated_types.py b/lib/pydantic/annotated_types.py new file mode 100644 index 00000000..d333457f --- /dev/null +++ b/lib/pydantic/annotated_types.py @@ -0,0 +1,72 @@ +import sys +from typing import TYPE_CHECKING, Any, Dict, FrozenSet, NamedTuple, Type + +from .fields import Required +from .main import BaseModel, create_model +from .typing import is_typeddict, is_typeddict_special + +if TYPE_CHECKING: + from typing_extensions import TypedDict + +if sys.version_info < (3, 11): + + def is_legacy_typeddict(typeddict_cls: Type['TypedDict']) -> bool: # type: ignore[valid-type] + return is_typeddict(typeddict_cls) and type(typeddict_cls).__module__ == 'typing' + +else: + + def is_legacy_typeddict(_: Any) -> Any: + return False + + +def create_model_from_typeddict( + # Mypy bug: `Type[TypedDict]` is resolved as `Any` https://github.com/python/mypy/issues/11030 + typeddict_cls: Type['TypedDict'], # type: ignore[valid-type] + **kwargs: Any, +) -> Type['BaseModel']: + """ + Create a `BaseModel` based on the fields of a `TypedDict`. + Since `typing.TypedDict` in Python 3.8 does not store runtime information about optional keys, + we raise an error if this happens (see https://bugs.python.org/issue38834). + """ + field_definitions: Dict[str, Any] + + # Best case scenario: with python 3.9+ or when `TypedDict` is imported from `typing_extensions` + if not hasattr(typeddict_cls, '__required_keys__'): + raise TypeError( + 'You should use `typing_extensions.TypedDict` instead of `typing.TypedDict` with Python < 3.9.2. ' + 'Without it, there is no way to differentiate required and optional fields when subclassed.' + ) + + if is_legacy_typeddict(typeddict_cls) and any( + is_typeddict_special(t) for t in typeddict_cls.__annotations__.values() + ): + raise TypeError( + 'You should use `typing_extensions.TypedDict` instead of `typing.TypedDict` with Python < 3.11. ' + 'Without it, there is no way to reflect Required/NotRequired keys.' + ) + + required_keys: FrozenSet[str] = typeddict_cls.__required_keys__ # type: ignore[attr-defined] + field_definitions = { + field_name: (field_type, Required if field_name in required_keys else None) + for field_name, field_type in typeddict_cls.__annotations__.items() + } + + return create_model(typeddict_cls.__name__, **kwargs, **field_definitions) + + +def create_model_from_namedtuple(namedtuple_cls: Type['NamedTuple'], **kwargs: Any) -> Type['BaseModel']: + """ + Create a `BaseModel` based on the fields of a named tuple. + A named tuple can be created with `typing.NamedTuple` and declared annotations + but also with `collections.namedtuple`, in this case we consider all fields + to have type `Any`. + """ + # With python 3.10+, `__annotations__` always exists but can be empty hence the `getattr... or...` logic + namedtuple_annotations: Dict[str, Type[Any]] = getattr(namedtuple_cls, '__annotations__', None) or { + k: Any for k in namedtuple_cls._fields + } + field_definitions: Dict[str, Any] = { + field_name: (field_type, Required) for field_name, field_type in namedtuple_annotations.items() + } + return create_model(namedtuple_cls.__name__, **kwargs, **field_definitions) diff --git a/lib/pydantic/class_validators.py b/lib/pydantic/class_validators.py new file mode 100644 index 00000000..87190610 --- /dev/null +++ b/lib/pydantic/class_validators.py @@ -0,0 +1,342 @@ +import warnings +from collections import ChainMap +from functools import wraps +from itertools import chain +from types import FunctionType +from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Type, Union, overload + +from .errors import ConfigError +from .typing import AnyCallable +from .utils import ROOT_KEY, in_ipython + +if TYPE_CHECKING: + from .typing import AnyClassMethod + + +class Validator: + __slots__ = 'func', 'pre', 'each_item', 'always', 'check_fields', 'skip_on_failure' + + def __init__( + self, + func: AnyCallable, + pre: bool = False, + each_item: bool = False, + always: bool = False, + check_fields: bool = False, + skip_on_failure: bool = False, + ): + self.func = func + self.pre = pre + self.each_item = each_item + self.always = always + self.check_fields = check_fields + self.skip_on_failure = skip_on_failure + + +if TYPE_CHECKING: + from inspect import Signature + + from .config import BaseConfig + from .fields import ModelField + from .types import ModelOrDc + + ValidatorCallable = Callable[[Optional[ModelOrDc], Any, Dict[str, Any], ModelField, Type[BaseConfig]], Any] + ValidatorsList = List[ValidatorCallable] + ValidatorListDict = Dict[str, List[Validator]] + +_FUNCS: Set[str] = set() +VALIDATOR_CONFIG_KEY = '__validator_config__' +ROOT_VALIDATOR_CONFIG_KEY = '__root_validator_config__' + + +def validator( + *fields: str, + pre: bool = False, + each_item: bool = False, + always: bool = False, + check_fields: bool = True, + whole: bool = None, + allow_reuse: bool = False, +) -> Callable[[AnyCallable], 'AnyClassMethod']: + """ + Decorate methods on the class indicating that they should be used to validate fields + :param fields: which field(s) the method should be called on + :param pre: whether or not this validator should be called before the standard validators (else after) + :param each_item: for complex objects (sets, lists etc.) whether to validate individual elements rather than the + whole object + :param always: whether this method and other validators should be called even if the value is missing + :param check_fields: whether to check that the fields actually exist on the model + :param allow_reuse: whether to track and raise an error if another validator refers to the decorated function + """ + if not fields: + raise ConfigError('validator with no fields specified') + elif isinstance(fields[0], FunctionType): + raise ConfigError( + "validators should be used with fields and keyword arguments, not bare. " # noqa: Q000 + "E.g. usage should be `@validator('', ...)`" + ) + elif not all(isinstance(field, str) for field in fields): + raise ConfigError( + "validator fields should be passed as separate string args. " # noqa: Q000 + "E.g. usage should be `@validator('', '', ...)`" + ) + + if whole is not None: + warnings.warn( + 'The "whole" keyword argument is deprecated, use "each_item" (inverse meaning, default False) instead', + DeprecationWarning, + ) + assert each_item is False, '"each_item" and "whole" conflict, remove "whole"' + each_item = not whole + + def dec(f: AnyCallable) -> 'AnyClassMethod': + f_cls = _prepare_validator(f, allow_reuse) + setattr( + f_cls, + VALIDATOR_CONFIG_KEY, + ( + fields, + Validator(func=f_cls.__func__, pre=pre, each_item=each_item, always=always, check_fields=check_fields), + ), + ) + return f_cls + + return dec + + +@overload +def root_validator(_func: AnyCallable) -> 'AnyClassMethod': + ... + + +@overload +def root_validator( + *, pre: bool = False, allow_reuse: bool = False, skip_on_failure: bool = False +) -> Callable[[AnyCallable], 'AnyClassMethod']: + ... + + +def root_validator( + _func: Optional[AnyCallable] = None, *, pre: bool = False, allow_reuse: bool = False, skip_on_failure: bool = False +) -> Union['AnyClassMethod', Callable[[AnyCallable], 'AnyClassMethod']]: + """ + Decorate methods on a model indicating that they should be used to validate (and perhaps modify) data either + before or after standard model parsing/validation is performed. + """ + if _func: + f_cls = _prepare_validator(_func, allow_reuse) + setattr( + f_cls, ROOT_VALIDATOR_CONFIG_KEY, Validator(func=f_cls.__func__, pre=pre, skip_on_failure=skip_on_failure) + ) + return f_cls + + def dec(f: AnyCallable) -> 'AnyClassMethod': + f_cls = _prepare_validator(f, allow_reuse) + setattr( + f_cls, ROOT_VALIDATOR_CONFIG_KEY, Validator(func=f_cls.__func__, pre=pre, skip_on_failure=skip_on_failure) + ) + return f_cls + + return dec + + +def _prepare_validator(function: AnyCallable, allow_reuse: bool) -> 'AnyClassMethod': + """ + Avoid validators with duplicated names since without this, validators can be overwritten silently + which generally isn't the intended behaviour, don't run in ipython (see #312) or if allow_reuse is False. + """ + f_cls = function if isinstance(function, classmethod) else classmethod(function) + if not in_ipython() and not allow_reuse: + ref = f_cls.__func__.__module__ + '.' + f_cls.__func__.__qualname__ + if ref in _FUNCS: + raise ConfigError(f'duplicate validator function "{ref}"; if this is intended, set `allow_reuse=True`') + _FUNCS.add(ref) + return f_cls + + +class ValidatorGroup: + def __init__(self, validators: 'ValidatorListDict') -> None: + self.validators = validators + self.used_validators = {'*'} + + def get_validators(self, name: str) -> Optional[Dict[str, Validator]]: + self.used_validators.add(name) + validators = self.validators.get(name, []) + if name != ROOT_KEY: + validators += self.validators.get('*', []) + if validators: + return {v.func.__name__: v for v in validators} + else: + return None + + def check_for_unused(self) -> None: + unused_validators = set( + chain.from_iterable( + (v.func.__name__ for v in self.validators[f] if v.check_fields) + for f in (self.validators.keys() - self.used_validators) + ) + ) + if unused_validators: + fn = ', '.join(unused_validators) + raise ConfigError( + f"Validators defined with incorrect fields: {fn} " # noqa: Q000 + f"(use check_fields=False if you're inheriting from the model and intended this)" + ) + + +def extract_validators(namespace: Dict[str, Any]) -> Dict[str, List[Validator]]: + validators: Dict[str, List[Validator]] = {} + for var_name, value in namespace.items(): + validator_config = getattr(value, VALIDATOR_CONFIG_KEY, None) + if validator_config: + fields, v = validator_config + for field in fields: + if field in validators: + validators[field].append(v) + else: + validators[field] = [v] + return validators + + +def extract_root_validators(namespace: Dict[str, Any]) -> Tuple[List[AnyCallable], List[Tuple[bool, AnyCallable]]]: + from inspect import signature + + pre_validators: List[AnyCallable] = [] + post_validators: List[Tuple[bool, AnyCallable]] = [] + for name, value in namespace.items(): + validator_config: Optional[Validator] = getattr(value, ROOT_VALIDATOR_CONFIG_KEY, None) + if validator_config: + sig = signature(validator_config.func) + args = list(sig.parameters.keys()) + if args[0] == 'self': + raise ConfigError( + f'Invalid signature for root validator {name}: {sig}, "self" not permitted as first argument, ' + f'should be: (cls, values).' + ) + if len(args) != 2: + raise ConfigError(f'Invalid signature for root validator {name}: {sig}, should be: (cls, values).') + # check function signature + if validator_config.pre: + pre_validators.append(validator_config.func) + else: + post_validators.append((validator_config.skip_on_failure, validator_config.func)) + return pre_validators, post_validators + + +def inherit_validators(base_validators: 'ValidatorListDict', validators: 'ValidatorListDict') -> 'ValidatorListDict': + for field, field_validators in base_validators.items(): + if field not in validators: + validators[field] = [] + validators[field] += field_validators + return validators + + +def make_generic_validator(validator: AnyCallable) -> 'ValidatorCallable': + """ + Make a generic function which calls a validator with the right arguments. + + Unfortunately other approaches (eg. return a partial of a function that builds the arguments) is slow, + hence this laborious way of doing things. + + It's done like this so validators don't all need **kwargs in their signature, eg. any combination of + the arguments "values", "fields" and/or "config" are permitted. + """ + from inspect import signature + + sig = signature(validator) + args = list(sig.parameters.keys()) + first_arg = args.pop(0) + if first_arg == 'self': + raise ConfigError( + f'Invalid signature for validator {validator}: {sig}, "self" not permitted as first argument, ' + f'should be: (cls, value, values, config, field), "values", "config" and "field" are all optional.' + ) + elif first_arg == 'cls': + # assume the second argument is value + return wraps(validator)(_generic_validator_cls(validator, sig, set(args[1:]))) + else: + # assume the first argument was value which has already been removed + return wraps(validator)(_generic_validator_basic(validator, sig, set(args))) + + +def prep_validators(v_funcs: Iterable[AnyCallable]) -> 'ValidatorsList': + return [make_generic_validator(f) for f in v_funcs if f] + + +all_kwargs = {'values', 'field', 'config'} + + +def _generic_validator_cls(validator: AnyCallable, sig: 'Signature', args: Set[str]) -> 'ValidatorCallable': + # assume the first argument is value + has_kwargs = False + if 'kwargs' in args: + has_kwargs = True + args -= {'kwargs'} + + if not args.issubset(all_kwargs): + raise ConfigError( + f'Invalid signature for validator {validator}: {sig}, should be: ' + f'(cls, value, values, config, field), "values", "config" and "field" are all optional.' + ) + + if has_kwargs: + return lambda cls, v, values, field, config: validator(cls, v, values=values, field=field, config=config) + elif args == set(): + return lambda cls, v, values, field, config: validator(cls, v) + elif args == {'values'}: + return lambda cls, v, values, field, config: validator(cls, v, values=values) + elif args == {'field'}: + return lambda cls, v, values, field, config: validator(cls, v, field=field) + elif args == {'config'}: + return lambda cls, v, values, field, config: validator(cls, v, config=config) + elif args == {'values', 'field'}: + return lambda cls, v, values, field, config: validator(cls, v, values=values, field=field) + elif args == {'values', 'config'}: + return lambda cls, v, values, field, config: validator(cls, v, values=values, config=config) + elif args == {'field', 'config'}: + return lambda cls, v, values, field, config: validator(cls, v, field=field, config=config) + else: + # args == {'values', 'field', 'config'} + return lambda cls, v, values, field, config: validator(cls, v, values=values, field=field, config=config) + + +def _generic_validator_basic(validator: AnyCallable, sig: 'Signature', args: Set[str]) -> 'ValidatorCallable': + has_kwargs = False + if 'kwargs' in args: + has_kwargs = True + args -= {'kwargs'} + + if not args.issubset(all_kwargs): + raise ConfigError( + f'Invalid signature for validator {validator}: {sig}, should be: ' + f'(value, values, config, field), "values", "config" and "field" are all optional.' + ) + + if has_kwargs: + return lambda cls, v, values, field, config: validator(v, values=values, field=field, config=config) + elif args == set(): + return lambda cls, v, values, field, config: validator(v) + elif args == {'values'}: + return lambda cls, v, values, field, config: validator(v, values=values) + elif args == {'field'}: + return lambda cls, v, values, field, config: validator(v, field=field) + elif args == {'config'}: + return lambda cls, v, values, field, config: validator(v, config=config) + elif args == {'values', 'field'}: + return lambda cls, v, values, field, config: validator(v, values=values, field=field) + elif args == {'values', 'config'}: + return lambda cls, v, values, field, config: validator(v, values=values, config=config) + elif args == {'field', 'config'}: + return lambda cls, v, values, field, config: validator(v, field=field, config=config) + else: + # args == {'values', 'field', 'config'} + return lambda cls, v, values, field, config: validator(v, values=values, field=field, config=config) + + +def gather_all_validators(type_: 'ModelOrDc') -> Dict[str, 'AnyClassMethod']: + all_attributes = ChainMap(*[cls.__dict__ for cls in type_.__mro__]) # type: ignore[arg-type,var-annotated] + return { + k: v + for k, v in all_attributes.items() + if hasattr(v, VALIDATOR_CONFIG_KEY) or hasattr(v, ROOT_VALIDATOR_CONFIG_KEY) + } diff --git a/lib/pydantic/color.py b/lib/pydantic/color.py new file mode 100644 index 00000000..6fdc9fb1 --- /dev/null +++ b/lib/pydantic/color.py @@ -0,0 +1,494 @@ +""" +Color definitions are used as per CSS3 specification: +http://www.w3.org/TR/css3-color/#svg-color + +A few colors have multiple names referring to the sames colors, eg. `grey` and `gray` or `aqua` and `cyan`. + +In these cases the LAST color when sorted alphabetically takes preferences, +eg. Color((0, 255, 255)).as_named() == 'cyan' because "cyan" comes after "aqua". +""" +import math +import re +from colorsys import hls_to_rgb, rgb_to_hls +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union, cast + +from .errors import ColorError +from .utils import Representation, almost_equal_floats + +if TYPE_CHECKING: + from .typing import CallableGenerator, ReprArgs + +ColorTuple = Union[Tuple[int, int, int], Tuple[int, int, int, float]] +ColorType = Union[ColorTuple, str] +HslColorTuple = Union[Tuple[float, float, float], Tuple[float, float, float, float]] + + +class RGBA: + """ + Internal use only as a representation of a color. + """ + + __slots__ = 'r', 'g', 'b', 'alpha', '_tuple' + + def __init__(self, r: float, g: float, b: float, alpha: Optional[float]): + self.r = r + self.g = g + self.b = b + self.alpha = alpha + + self._tuple: Tuple[float, float, float, Optional[float]] = (r, g, b, alpha) + + def __getitem__(self, item: Any) -> Any: + return self._tuple[item] + + +# these are not compiled here to avoid import slowdown, they'll be compiled the first time they're used, then cached +r_hex_short = r'\s*(?:#|0x)?([0-9a-f])([0-9a-f])([0-9a-f])([0-9a-f])?\s*' +r_hex_long = r'\s*(?:#|0x)?([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})([0-9a-f]{2})?\s*' +_r_255 = r'(\d{1,3}(?:\.\d+)?)' +_r_comma = r'\s*,\s*' +r_rgb = fr'\s*rgb\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}\)\s*' +_r_alpha = r'(\d(?:\.\d+)?|\.\d+|\d{1,2}%)' +r_rgba = fr'\s*rgba\(\s*{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_255}{_r_comma}{_r_alpha}\s*\)\s*' +_r_h = r'(-?\d+(?:\.\d+)?|-?\.\d+)(deg|rad|turn)?' +_r_sl = r'(\d{1,3}(?:\.\d+)?)%' +r_hsl = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}\s*\)\s*' +r_hsla = fr'\s*hsl\(\s*{_r_h}{_r_comma}{_r_sl}{_r_comma}{_r_sl}{_r_comma}{_r_alpha}\s*\)\s*' + +# colors where the two hex characters are the same, if all colors match this the short version of hex colors can be used +repeat_colors = {int(c * 2, 16) for c in '0123456789abcdef'} +rads = 2 * math.pi + + +class Color(Representation): + __slots__ = '_original', '_rgba' + + def __init__(self, value: ColorType) -> None: + self._rgba: RGBA + self._original: ColorType + if isinstance(value, (tuple, list)): + self._rgba = parse_tuple(value) + elif isinstance(value, str): + self._rgba = parse_str(value) + elif isinstance(value, Color): + self._rgba = value._rgba + value = value._original + else: + raise ColorError(reason='value must be a tuple, list or string') + + # if we've got here value must be a valid color + self._original = value + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format='color') + + def original(self) -> ColorType: + """ + Original value passed to Color + """ + return self._original + + def as_named(self, *, fallback: bool = False) -> str: + if self._rgba.alpha is None: + rgb = cast(Tuple[int, int, int], self.as_rgb_tuple()) + try: + return COLORS_BY_VALUE[rgb] + except KeyError as e: + if fallback: + return self.as_hex() + else: + raise ValueError('no named color found, use fallback=True, as_hex() or as_rgb()') from e + else: + return self.as_hex() + + def as_hex(self) -> str: + """ + Hex string representing the color can be 3, 4, 6 or 8 characters depending on whether the string + a "short" representation of the color is possible and whether there's an alpha channel. + """ + values = [float_to_255(c) for c in self._rgba[:3]] + if self._rgba.alpha is not None: + values.append(float_to_255(self._rgba.alpha)) + + as_hex = ''.join(f'{v:02x}' for v in values) + if all(c in repeat_colors for c in values): + as_hex = ''.join(as_hex[c] for c in range(0, len(as_hex), 2)) + return '#' + as_hex + + def as_rgb(self) -> str: + """ + Color as an rgb(, , ) or rgba(, , , ) string. + """ + if self._rgba.alpha is None: + return f'rgb({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)})' + else: + return ( + f'rgba({float_to_255(self._rgba.r)}, {float_to_255(self._rgba.g)}, {float_to_255(self._rgba.b)}, ' + f'{round(self._alpha_float(), 2)})' + ) + + def as_rgb_tuple(self, *, alpha: Optional[bool] = None) -> ColorTuple: + """ + Color as an RGB or RGBA tuple; red, green and blue are in the range 0 to 255, alpha if included is + in the range 0 to 1. + + :param alpha: whether to include the alpha channel, options are + None - (default) include alpha only if it's set (e.g. not None) + True - always include alpha, + False - always omit alpha, + """ + r, g, b = (float_to_255(c) for c in self._rgba[:3]) + if alpha is None: + if self._rgba.alpha is None: + return r, g, b + else: + return r, g, b, self._alpha_float() + elif alpha: + return r, g, b, self._alpha_float() + else: + # alpha is False + return r, g, b + + def as_hsl(self) -> str: + """ + Color as an hsl(, , ) or hsl(, , , ) string. + """ + if self._rgba.alpha is None: + h, s, li = self.as_hsl_tuple(alpha=False) # type: ignore + return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%})' + else: + h, s, li, a = self.as_hsl_tuple(alpha=True) # type: ignore + return f'hsl({h * 360:0.0f}, {s:0.0%}, {li:0.0%}, {round(a, 2)})' + + def as_hsl_tuple(self, *, alpha: Optional[bool] = None) -> HslColorTuple: + """ + Color as an HSL or HSLA tuple, e.g. hue, saturation, lightness and optionally alpha; all elements are in + the range 0 to 1. + + NOTE: this is HSL as used in HTML and most other places, not HLS as used in python's colorsys. + + :param alpha: whether to include the alpha channel, options are + None - (default) include alpha only if it's set (e.g. not None) + True - always include alpha, + False - always omit alpha, + """ + h, l, s = rgb_to_hls(self._rgba.r, self._rgba.g, self._rgba.b) + if alpha is None: + if self._rgba.alpha is None: + return h, s, l + else: + return h, s, l, self._alpha_float() + if alpha: + return h, s, l, self._alpha_float() + else: + # alpha is False + return h, s, l + + def _alpha_float(self) -> float: + return 1 if self._rgba.alpha is None else self._rgba.alpha + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls + + def __str__(self) -> str: + return self.as_named(fallback=True) + + def __repr_args__(self) -> 'ReprArgs': + return [(None, self.as_named(fallback=True))] + [('rgb', self.as_rgb_tuple())] # type: ignore + + def __eq__(self, other: Any) -> bool: + return isinstance(other, Color) and self.as_rgb_tuple() == other.as_rgb_tuple() + + def __hash__(self) -> int: + return hash(self.as_rgb_tuple()) + + +def parse_tuple(value: Tuple[Any, ...]) -> RGBA: + """ + Parse a tuple or list as a color. + """ + if len(value) == 3: + r, g, b = (parse_color_value(v) for v in value) + return RGBA(r, g, b, None) + elif len(value) == 4: + r, g, b = (parse_color_value(v) for v in value[:3]) + return RGBA(r, g, b, parse_float_alpha(value[3])) + else: + raise ColorError(reason='tuples must have length 3 or 4') + + +def parse_str(value: str) -> RGBA: + """ + Parse a string to an RGBA tuple, trying the following formats (in this order): + * named color, see COLORS_BY_NAME below + * hex short eg. `fff` (prefix can be `#`, `0x` or nothing) + * hex long eg. `ffffff` (prefix can be `#`, `0x` or nothing) + * `rgb(, , ) ` + * `rgba(, , , )` + """ + value_lower = value.lower() + try: + r, g, b = COLORS_BY_NAME[value_lower] + except KeyError: + pass + else: + return ints_to_rgba(r, g, b, None) + + m = re.fullmatch(r_hex_short, value_lower) + if m: + *rgb, a = m.groups() + r, g, b = (int(v * 2, 16) for v in rgb) + if a: + alpha: Optional[float] = int(a * 2, 16) / 255 + else: + alpha = None + return ints_to_rgba(r, g, b, alpha) + + m = re.fullmatch(r_hex_long, value_lower) + if m: + *rgb, a = m.groups() + r, g, b = (int(v, 16) for v in rgb) + if a: + alpha = int(a, 16) / 255 + else: + alpha = None + return ints_to_rgba(r, g, b, alpha) + + m = re.fullmatch(r_rgb, value_lower) + if m: + return ints_to_rgba(*m.groups(), None) # type: ignore + + m = re.fullmatch(r_rgba, value_lower) + if m: + return ints_to_rgba(*m.groups()) # type: ignore + + m = re.fullmatch(r_hsl, value_lower) + if m: + h, h_units, s, l_ = m.groups() + return parse_hsl(h, h_units, s, l_) + + m = re.fullmatch(r_hsla, value_lower) + if m: + h, h_units, s, l_, a = m.groups() + return parse_hsl(h, h_units, s, l_, parse_float_alpha(a)) + + raise ColorError(reason='string not recognised as a valid color') + + +def ints_to_rgba(r: Union[int, str], g: Union[int, str], b: Union[int, str], alpha: Optional[float]) -> RGBA: + return RGBA(parse_color_value(r), parse_color_value(g), parse_color_value(b), parse_float_alpha(alpha)) + + +def parse_color_value(value: Union[int, str], max_val: int = 255) -> float: + """ + Parse a value checking it's a valid int in the range 0 to max_val and divide by max_val to give a number + in the range 0 to 1 + """ + try: + color = float(value) + except ValueError: + raise ColorError(reason='color values must be a valid number') + if 0 <= color <= max_val: + return color / max_val + else: + raise ColorError(reason=f'color values must be in the range 0 to {max_val}') + + +def parse_float_alpha(value: Union[None, str, float, int]) -> Optional[float]: + """ + Parse a value checking it's a valid float in the range 0 to 1 + """ + if value is None: + return None + try: + if isinstance(value, str) and value.endswith('%'): + alpha = float(value[:-1]) / 100 + else: + alpha = float(value) + except ValueError: + raise ColorError(reason='alpha values must be a valid float') + + if almost_equal_floats(alpha, 1): + return None + elif 0 <= alpha <= 1: + return alpha + else: + raise ColorError(reason='alpha values must be in the range 0 to 1') + + +def parse_hsl(h: str, h_units: str, sat: str, light: str, alpha: Optional[float] = None) -> RGBA: + """ + Parse raw hue, saturation, lightness and alpha values and convert to RGBA. + """ + s_value, l_value = parse_color_value(sat, 100), parse_color_value(light, 100) + + h_value = float(h) + if h_units in {None, 'deg'}: + h_value = h_value % 360 / 360 + elif h_units == 'rad': + h_value = h_value % rads / rads + else: + # turns + h_value = h_value % 1 + + r, g, b = hls_to_rgb(h_value, l_value, s_value) + return RGBA(r, g, b, alpha) + + +def float_to_255(c: float) -> int: + return int(round(c * 255)) + + +COLORS_BY_NAME = { + 'aliceblue': (240, 248, 255), + 'antiquewhite': (250, 235, 215), + 'aqua': (0, 255, 255), + 'aquamarine': (127, 255, 212), + 'azure': (240, 255, 255), + 'beige': (245, 245, 220), + 'bisque': (255, 228, 196), + 'black': (0, 0, 0), + 'blanchedalmond': (255, 235, 205), + 'blue': (0, 0, 255), + 'blueviolet': (138, 43, 226), + 'brown': (165, 42, 42), + 'burlywood': (222, 184, 135), + 'cadetblue': (95, 158, 160), + 'chartreuse': (127, 255, 0), + 'chocolate': (210, 105, 30), + 'coral': (255, 127, 80), + 'cornflowerblue': (100, 149, 237), + 'cornsilk': (255, 248, 220), + 'crimson': (220, 20, 60), + 'cyan': (0, 255, 255), + 'darkblue': (0, 0, 139), + 'darkcyan': (0, 139, 139), + 'darkgoldenrod': (184, 134, 11), + 'darkgray': (169, 169, 169), + 'darkgreen': (0, 100, 0), + 'darkgrey': (169, 169, 169), + 'darkkhaki': (189, 183, 107), + 'darkmagenta': (139, 0, 139), + 'darkolivegreen': (85, 107, 47), + 'darkorange': (255, 140, 0), + 'darkorchid': (153, 50, 204), + 'darkred': (139, 0, 0), + 'darksalmon': (233, 150, 122), + 'darkseagreen': (143, 188, 143), + 'darkslateblue': (72, 61, 139), + 'darkslategray': (47, 79, 79), + 'darkslategrey': (47, 79, 79), + 'darkturquoise': (0, 206, 209), + 'darkviolet': (148, 0, 211), + 'deeppink': (255, 20, 147), + 'deepskyblue': (0, 191, 255), + 'dimgray': (105, 105, 105), + 'dimgrey': (105, 105, 105), + 'dodgerblue': (30, 144, 255), + 'firebrick': (178, 34, 34), + 'floralwhite': (255, 250, 240), + 'forestgreen': (34, 139, 34), + 'fuchsia': (255, 0, 255), + 'gainsboro': (220, 220, 220), + 'ghostwhite': (248, 248, 255), + 'gold': (255, 215, 0), + 'goldenrod': (218, 165, 32), + 'gray': (128, 128, 128), + 'green': (0, 128, 0), + 'greenyellow': (173, 255, 47), + 'grey': (128, 128, 128), + 'honeydew': (240, 255, 240), + 'hotpink': (255, 105, 180), + 'indianred': (205, 92, 92), + 'indigo': (75, 0, 130), + 'ivory': (255, 255, 240), + 'khaki': (240, 230, 140), + 'lavender': (230, 230, 250), + 'lavenderblush': (255, 240, 245), + 'lawngreen': (124, 252, 0), + 'lemonchiffon': (255, 250, 205), + 'lightblue': (173, 216, 230), + 'lightcoral': (240, 128, 128), + 'lightcyan': (224, 255, 255), + 'lightgoldenrodyellow': (250, 250, 210), + 'lightgray': (211, 211, 211), + 'lightgreen': (144, 238, 144), + 'lightgrey': (211, 211, 211), + 'lightpink': (255, 182, 193), + 'lightsalmon': (255, 160, 122), + 'lightseagreen': (32, 178, 170), + 'lightskyblue': (135, 206, 250), + 'lightslategray': (119, 136, 153), + 'lightslategrey': (119, 136, 153), + 'lightsteelblue': (176, 196, 222), + 'lightyellow': (255, 255, 224), + 'lime': (0, 255, 0), + 'limegreen': (50, 205, 50), + 'linen': (250, 240, 230), + 'magenta': (255, 0, 255), + 'maroon': (128, 0, 0), + 'mediumaquamarine': (102, 205, 170), + 'mediumblue': (0, 0, 205), + 'mediumorchid': (186, 85, 211), + 'mediumpurple': (147, 112, 219), + 'mediumseagreen': (60, 179, 113), + 'mediumslateblue': (123, 104, 238), + 'mediumspringgreen': (0, 250, 154), + 'mediumturquoise': (72, 209, 204), + 'mediumvioletred': (199, 21, 133), + 'midnightblue': (25, 25, 112), + 'mintcream': (245, 255, 250), + 'mistyrose': (255, 228, 225), + 'moccasin': (255, 228, 181), + 'navajowhite': (255, 222, 173), + 'navy': (0, 0, 128), + 'oldlace': (253, 245, 230), + 'olive': (128, 128, 0), + 'olivedrab': (107, 142, 35), + 'orange': (255, 165, 0), + 'orangered': (255, 69, 0), + 'orchid': (218, 112, 214), + 'palegoldenrod': (238, 232, 170), + 'palegreen': (152, 251, 152), + 'paleturquoise': (175, 238, 238), + 'palevioletred': (219, 112, 147), + 'papayawhip': (255, 239, 213), + 'peachpuff': (255, 218, 185), + 'peru': (205, 133, 63), + 'pink': (255, 192, 203), + 'plum': (221, 160, 221), + 'powderblue': (176, 224, 230), + 'purple': (128, 0, 128), + 'red': (255, 0, 0), + 'rosybrown': (188, 143, 143), + 'royalblue': (65, 105, 225), + 'saddlebrown': (139, 69, 19), + 'salmon': (250, 128, 114), + 'sandybrown': (244, 164, 96), + 'seagreen': (46, 139, 87), + 'seashell': (255, 245, 238), + 'sienna': (160, 82, 45), + 'silver': (192, 192, 192), + 'skyblue': (135, 206, 235), + 'slateblue': (106, 90, 205), + 'slategray': (112, 128, 144), + 'slategrey': (112, 128, 144), + 'snow': (255, 250, 250), + 'springgreen': (0, 255, 127), + 'steelblue': (70, 130, 180), + 'tan': (210, 180, 140), + 'teal': (0, 128, 128), + 'thistle': (216, 191, 216), + 'tomato': (255, 99, 71), + 'turquoise': (64, 224, 208), + 'violet': (238, 130, 238), + 'wheat': (245, 222, 179), + 'white': (255, 255, 255), + 'whitesmoke': (245, 245, 245), + 'yellow': (255, 255, 0), + 'yellowgreen': (154, 205, 50), +} + +COLORS_BY_VALUE = {v: k for k, v in COLORS_BY_NAME.items()} diff --git a/lib/pydantic/config.py b/lib/pydantic/config.py new file mode 100644 index 00000000..74687ca0 --- /dev/null +++ b/lib/pydantic/config.py @@ -0,0 +1,192 @@ +import json +from enum import Enum +from typing import TYPE_CHECKING, Any, Callable, Dict, ForwardRef, Optional, Tuple, Type, Union + +from typing_extensions import Literal, Protocol + +from .typing import AnyArgTCallable, AnyCallable +from .utils import GetterDict +from .version import compiled + +if TYPE_CHECKING: + from typing import overload + + from .fields import ModelField + from .main import BaseModel + + ConfigType = Type['BaseConfig'] + + class SchemaExtraCallable(Protocol): + @overload + def __call__(self, schema: Dict[str, Any]) -> None: + pass + + @overload + def __call__(self, schema: Dict[str, Any], model_class: Type[BaseModel]) -> None: + pass + +else: + SchemaExtraCallable = Callable[..., None] + +__all__ = 'BaseConfig', 'ConfigDict', 'get_config', 'Extra', 'inherit_config', 'prepare_config' + + +class Extra(str, Enum): + allow = 'allow' + ignore = 'ignore' + forbid = 'forbid' + + +# https://github.com/cython/cython/issues/4003 +# Will be fixed with Cython 3 but still in alpha right now +if not compiled: + from typing_extensions import TypedDict + + class ConfigDict(TypedDict, total=False): + title: Optional[str] + anystr_lower: bool + anystr_strip_whitespace: bool + min_anystr_length: int + max_anystr_length: Optional[int] + validate_all: bool + extra: Extra + allow_mutation: bool + frozen: bool + allow_population_by_field_name: bool + use_enum_values: bool + fields: Dict[str, Union[str, Dict[str, str]]] + validate_assignment: bool + error_msg_templates: Dict[str, str] + arbitrary_types_allowed: bool + orm_mode: bool + getter_dict: Type[GetterDict] + alias_generator: Optional[Callable[[str], str]] + keep_untouched: Tuple[type, ...] + schema_extra: Union[Dict[str, object], 'SchemaExtraCallable'] + json_loads: Callable[[str], object] + json_dumps: AnyArgTCallable[str] + json_encoders: Dict[Type[object], AnyCallable] + underscore_attrs_are_private: bool + allow_inf_nan: bool + + # whether or not inherited models as fields should be reconstructed as base model + copy_on_model_validation: bool + # whether dataclass `__post_init__` should be run after validation + post_init_call: Literal['before_validation', 'after_validation'] + +else: + ConfigDict = dict # type: ignore + + +class BaseConfig: + title: Optional[str] = None + anystr_lower: bool = False + anystr_upper: bool = False + anystr_strip_whitespace: bool = False + min_anystr_length: int = 0 + max_anystr_length: Optional[int] = None + validate_all: bool = False + extra: Extra = Extra.ignore + allow_mutation: bool = True + frozen: bool = False + allow_population_by_field_name: bool = False + use_enum_values: bool = False + fields: Dict[str, Union[str, Dict[str, str]]] = {} + validate_assignment: bool = False + error_msg_templates: Dict[str, str] = {} + arbitrary_types_allowed: bool = False + orm_mode: bool = False + getter_dict: Type[GetterDict] = GetterDict + alias_generator: Optional[Callable[[str], str]] = None + keep_untouched: Tuple[type, ...] = () + schema_extra: Union[Dict[str, Any], 'SchemaExtraCallable'] = {} + json_loads: Callable[[str], Any] = json.loads + json_dumps: Callable[..., str] = json.dumps + json_encoders: Dict[Union[Type[Any], str, ForwardRef], AnyCallable] = {} + underscore_attrs_are_private: bool = False + allow_inf_nan: bool = True + + # whether inherited models as fields should be reconstructed as base model, + # and whether such a copy should be shallow or deep + copy_on_model_validation: Literal['none', 'deep', 'shallow'] = 'shallow' + + # whether `Union` should check all allowed types before even trying to coerce + smart_union: bool = False + # whether dataclass `__post_init__` should be run before or after validation + post_init_call: Literal['before_validation', 'after_validation'] = 'before_validation' + + @classmethod + def get_field_info(cls, name: str) -> Dict[str, Any]: + """ + Get properties of FieldInfo from the `fields` property of the config class. + """ + + fields_value = cls.fields.get(name) + + if isinstance(fields_value, str): + field_info: Dict[str, Any] = {'alias': fields_value} + elif isinstance(fields_value, dict): + field_info = fields_value + else: + field_info = {} + + if 'alias' in field_info: + field_info.setdefault('alias_priority', 2) + + if field_info.get('alias_priority', 0) <= 1 and cls.alias_generator: + alias = cls.alias_generator(name) + if not isinstance(alias, str): + raise TypeError(f'Config.alias_generator must return str, not {alias.__class__}') + field_info.update(alias=alias, alias_priority=1) + return field_info + + @classmethod + def prepare_field(cls, field: 'ModelField') -> None: + """ + Optional hook to check or modify fields during model creation. + """ + pass + + +def get_config(config: Union[ConfigDict, Type[object], None]) -> Type[BaseConfig]: + if config is None: + return BaseConfig + + else: + config_dict = ( + config + if isinstance(config, dict) + else {k: getattr(config, k) for k in dir(config) if not k.startswith('__')} + ) + + class Config(BaseConfig): + ... + + for k, v in config_dict.items(): + setattr(Config, k, v) + return Config + + +def inherit_config(self_config: 'ConfigType', parent_config: 'ConfigType', **namespace: Any) -> 'ConfigType': + if not self_config: + base_classes: Tuple['ConfigType', ...] = (parent_config,) + elif self_config == parent_config: + base_classes = (self_config,) + else: + base_classes = self_config, parent_config + + namespace['json_encoders'] = { + **getattr(parent_config, 'json_encoders', {}), + **getattr(self_config, 'json_encoders', {}), + **namespace.get('json_encoders', {}), + } + + return type('Config', base_classes, namespace) + + +def prepare_config(config: Type[BaseConfig], cls_name: str) -> None: + if not isinstance(config.extra, Extra): + try: + config.extra = Extra(config.extra) + except ValueError: + raise ValueError(f'"{cls_name}": {config.extra} is not a valid value for "extra"') diff --git a/lib/pydantic/dataclasses.py b/lib/pydantic/dataclasses.py new file mode 100644 index 00000000..68331127 --- /dev/null +++ b/lib/pydantic/dataclasses.py @@ -0,0 +1,479 @@ +""" +The main purpose is to enhance stdlib dataclasses by adding validation +A pydantic dataclass can be generated from scratch or from a stdlib one. + +Behind the scene, a pydantic dataclass is just like a regular one on which we attach +a `BaseModel` and magic methods to trigger the validation of the data. +`__init__` and `__post_init__` are hence overridden and have extra logic to be +able to validate input data. + +When a pydantic dataclass is generated from scratch, it's just a plain dataclass +with validation triggered at initialization + +The tricky part if for stdlib dataclasses that are converted after into pydantic ones e.g. + +```py +@dataclasses.dataclass +class M: + x: int + +ValidatedM = pydantic.dataclasses.dataclass(M) +``` + +We indeed still want to support equality, hashing, repr, ... as if it was the stdlib one! + +```py +assert isinstance(ValidatedM(x=1), M) +assert ValidatedM(x=1) == M(x=1) +``` + +This means we **don't want to create a new dataclass that inherits from it** +The trick is to create a wrapper around `M` that will act as a proxy to trigger +validation without altering default `M` behaviour. +""" +import sys +from contextlib import contextmanager +from functools import wraps +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + Generator, + Optional, + Set, + Type, + TypeVar, + Union, + overload, +) + +from typing_extensions import dataclass_transform + +from .class_validators import gather_all_validators +from .config import BaseConfig, ConfigDict, Extra, get_config +from .error_wrappers import ValidationError +from .errors import DataclassTypeError +from .fields import Field, FieldInfo, Required, Undefined +from .main import create_model, validate_model +from .utils import ClassAttribute + +if TYPE_CHECKING: + from .main import BaseModel + from .typing import CallableGenerator, NoArgAnyCallable + + DataclassT = TypeVar('DataclassT', bound='Dataclass') + + DataclassClassOrWrapper = Union[Type['Dataclass'], 'DataclassProxy'] + + class Dataclass: + # stdlib attributes + __dataclass_fields__: ClassVar[Dict[str, Any]] + __dataclass_params__: ClassVar[Any] # in reality `dataclasses._DataclassParams` + __post_init__: ClassVar[Callable[..., None]] + + # Added by pydantic + __pydantic_run_validation__: ClassVar[bool] + __post_init_post_parse__: ClassVar[Callable[..., None]] + __pydantic_initialised__: ClassVar[bool] + __pydantic_model__: ClassVar[Type[BaseModel]] + __pydantic_validate_values__: ClassVar[Callable[['Dataclass'], None]] + __pydantic_has_field_info_default__: ClassVar[bool] # whether a `pydantic.Field` is used as default value + + def __init__(self, *args: object, **kwargs: object) -> None: + pass + + @classmethod + def __get_validators__(cls: Type['Dataclass']) -> 'CallableGenerator': + pass + + @classmethod + def __validate__(cls: Type['DataclassT'], v: Any) -> 'DataclassT': + pass + + +__all__ = [ + 'dataclass', + 'set_validation', + 'create_pydantic_model_from_dataclass', + 'is_builtin_dataclass', + 'make_dataclass_validator', +] + +_T = TypeVar('_T') + +if sys.version_info >= (3, 10): + + @dataclass_transform(kw_only_default=True, field_descriptors=(Field, FieldInfo)) + @overload + def dataclass( + *, + init: bool = True, + repr: bool = True, + eq: bool = True, + order: bool = False, + unsafe_hash: bool = False, + frozen: bool = False, + config: Union[ConfigDict, Type[object], None] = None, + validate_on_init: Optional[bool] = None, + kw_only: bool = ..., + ) -> Callable[[Type[_T]], 'DataclassClassOrWrapper']: + ... + + @dataclass_transform(kw_only_default=True, field_descriptors=(Field, FieldInfo)) + @overload + def dataclass( + _cls: Type[_T], + *, + init: bool = True, + repr: bool = True, + eq: bool = True, + order: bool = False, + unsafe_hash: bool = False, + frozen: bool = False, + config: Union[ConfigDict, Type[object], None] = None, + validate_on_init: Optional[bool] = None, + kw_only: bool = ..., + ) -> 'DataclassClassOrWrapper': + ... + +else: + + @dataclass_transform(kw_only_default=True, field_descriptors=(Field, FieldInfo)) + @overload + def dataclass( + *, + init: bool = True, + repr: bool = True, + eq: bool = True, + order: bool = False, + unsafe_hash: bool = False, + frozen: bool = False, + config: Union[ConfigDict, Type[object], None] = None, + validate_on_init: Optional[bool] = None, + ) -> Callable[[Type[_T]], 'DataclassClassOrWrapper']: + ... + + @dataclass_transform(kw_only_default=True, field_descriptors=(Field, FieldInfo)) + @overload + def dataclass( + _cls: Type[_T], + *, + init: bool = True, + repr: bool = True, + eq: bool = True, + order: bool = False, + unsafe_hash: bool = False, + frozen: bool = False, + config: Union[ConfigDict, Type[object], None] = None, + validate_on_init: Optional[bool] = None, + ) -> 'DataclassClassOrWrapper': + ... + + +@dataclass_transform(kw_only_default=True, field_descriptors=(Field, FieldInfo)) +def dataclass( + _cls: Optional[Type[_T]] = None, + *, + init: bool = True, + repr: bool = True, + eq: bool = True, + order: bool = False, + unsafe_hash: bool = False, + frozen: bool = False, + config: Union[ConfigDict, Type[object], None] = None, + validate_on_init: Optional[bool] = None, + kw_only: bool = False, +) -> Union[Callable[[Type[_T]], 'DataclassClassOrWrapper'], 'DataclassClassOrWrapper']: + """ + Like the python standard lib dataclasses but with type validation. + The result is either a pydantic dataclass that will validate input data + or a wrapper that will trigger validation around a stdlib dataclass + to avoid modifying it directly + """ + the_config = get_config(config) + + def wrap(cls: Type[Any]) -> 'DataclassClassOrWrapper': + import dataclasses + + if is_builtin_dataclass(cls) and _extra_dc_args(_cls) == _extra_dc_args(_cls.__bases__[0]): # type: ignore + dc_cls_doc = '' + dc_cls = DataclassProxy(cls) + default_validate_on_init = False + else: + dc_cls_doc = cls.__doc__ or '' # needs to be done before generating dataclass + if sys.version_info >= (3, 10): + dc_cls = dataclasses.dataclass( + cls, + init=init, + repr=repr, + eq=eq, + order=order, + unsafe_hash=unsafe_hash, + frozen=frozen, + kw_only=kw_only, + ) + else: + dc_cls = dataclasses.dataclass( # type: ignore + cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen + ) + default_validate_on_init = True + + should_validate_on_init = default_validate_on_init if validate_on_init is None else validate_on_init + _add_pydantic_validation_attributes(cls, the_config, should_validate_on_init, dc_cls_doc) + dc_cls.__pydantic_model__.__try_update_forward_refs__(**{cls.__name__: cls}) + return dc_cls + + if _cls is None: + return wrap + + return wrap(_cls) + + +@contextmanager +def set_validation(cls: Type['DataclassT'], value: bool) -> Generator[Type['DataclassT'], None, None]: + original_run_validation = cls.__pydantic_run_validation__ + try: + cls.__pydantic_run_validation__ = value + yield cls + finally: + cls.__pydantic_run_validation__ = original_run_validation + + +class DataclassProxy: + __slots__ = '__dataclass__' + + def __init__(self, dc_cls: Type['Dataclass']) -> None: + object.__setattr__(self, '__dataclass__', dc_cls) + + def __call__(self, *args: Any, **kwargs: Any) -> Any: + with set_validation(self.__dataclass__, True): + return self.__dataclass__(*args, **kwargs) + + def __getattr__(self, name: str) -> Any: + return getattr(self.__dataclass__, name) + + def __instancecheck__(self, instance: Any) -> bool: + return isinstance(instance, self.__dataclass__) + + +def _add_pydantic_validation_attributes( # noqa: C901 (ignore complexity) + dc_cls: Type['Dataclass'], + config: Type[BaseConfig], + validate_on_init: bool, + dc_cls_doc: str, +) -> None: + """ + We need to replace the right method. If no `__post_init__` has been set in the stdlib dataclass + it won't even exist (code is generated on the fly by `dataclasses`) + By default, we run validation after `__init__` or `__post_init__` if defined + """ + init = dc_cls.__init__ + + @wraps(init) + def handle_extra_init(self: 'Dataclass', *args: Any, **kwargs: Any) -> None: + if config.extra == Extra.ignore: + init(self, *args, **{k: v for k, v in kwargs.items() if k in self.__dataclass_fields__}) + + elif config.extra == Extra.allow: + for k, v in kwargs.items(): + self.__dict__.setdefault(k, v) + init(self, *args, **{k: v for k, v in kwargs.items() if k in self.__dataclass_fields__}) + + else: + init(self, *args, **kwargs) + + if hasattr(dc_cls, '__post_init__'): + post_init = dc_cls.__post_init__ + + @wraps(post_init) + def new_post_init(self: 'Dataclass', *args: Any, **kwargs: Any) -> None: + if config.post_init_call == 'before_validation': + post_init(self, *args, **kwargs) + + if self.__class__.__pydantic_run_validation__: + self.__pydantic_validate_values__() + if hasattr(self, '__post_init_post_parse__'): + self.__post_init_post_parse__(*args, **kwargs) + + if config.post_init_call == 'after_validation': + post_init(self, *args, **kwargs) + + setattr(dc_cls, '__init__', handle_extra_init) + setattr(dc_cls, '__post_init__', new_post_init) + + else: + + @wraps(init) + def new_init(self: 'Dataclass', *args: Any, **kwargs: Any) -> None: + handle_extra_init(self, *args, **kwargs) + + if self.__class__.__pydantic_run_validation__: + self.__pydantic_validate_values__() + + if hasattr(self, '__post_init_post_parse__'): + # We need to find again the initvars. To do that we use `__dataclass_fields__` instead of + # public method `dataclasses.fields` + import dataclasses + + # get all initvars and their default values + initvars_and_values: Dict[str, Any] = {} + for i, f in enumerate(self.__class__.__dataclass_fields__.values()): + if f._field_type is dataclasses._FIELD_INITVAR: # type: ignore[attr-defined] + try: + # set arg value by default + initvars_and_values[f.name] = args[i] + except IndexError: + initvars_and_values[f.name] = kwargs.get(f.name, f.default) + + self.__post_init_post_parse__(**initvars_and_values) + + setattr(dc_cls, '__init__', new_init) + + setattr(dc_cls, '__pydantic_run_validation__', ClassAttribute('__pydantic_run_validation__', validate_on_init)) + setattr(dc_cls, '__pydantic_initialised__', False) + setattr(dc_cls, '__pydantic_model__', create_pydantic_model_from_dataclass(dc_cls, config, dc_cls_doc)) + setattr(dc_cls, '__pydantic_validate_values__', _dataclass_validate_values) + setattr(dc_cls, '__validate__', classmethod(_validate_dataclass)) + setattr(dc_cls, '__get_validators__', classmethod(_get_validators)) + + if dc_cls.__pydantic_model__.__config__.validate_assignment and not dc_cls.__dataclass_params__.frozen: + setattr(dc_cls, '__setattr__', _dataclass_validate_assignment_setattr) + + +def _get_validators(cls: 'DataclassClassOrWrapper') -> 'CallableGenerator': + yield cls.__validate__ + + +def _validate_dataclass(cls: Type['DataclassT'], v: Any) -> 'DataclassT': + with set_validation(cls, True): + if isinstance(v, cls): + v.__pydantic_validate_values__() + return v + elif isinstance(v, (list, tuple)): + return cls(*v) + elif isinstance(v, dict): + return cls(**v) + else: + raise DataclassTypeError(class_name=cls.__name__) + + +def create_pydantic_model_from_dataclass( + dc_cls: Type['Dataclass'], + config: Type[Any] = BaseConfig, + dc_cls_doc: Optional[str] = None, +) -> Type['BaseModel']: + import dataclasses + + field_definitions: Dict[str, Any] = {} + for field in dataclasses.fields(dc_cls): + default: Any = Undefined + default_factory: Optional['NoArgAnyCallable'] = None + field_info: FieldInfo + + if field.default is not dataclasses.MISSING: + default = field.default + elif field.default_factory is not dataclasses.MISSING: + default_factory = field.default_factory + else: + default = Required + + if isinstance(default, FieldInfo): + field_info = default + dc_cls.__pydantic_has_field_info_default__ = True + else: + field_info = Field(default=default, default_factory=default_factory, **field.metadata) + + field_definitions[field.name] = (field.type, field_info) + + validators = gather_all_validators(dc_cls) + model: Type['BaseModel'] = create_model( + dc_cls.__name__, + __config__=config, + __module__=dc_cls.__module__, + __validators__=validators, + __cls_kwargs__={'__resolve_forward_refs__': False}, + **field_definitions, + ) + model.__doc__ = dc_cls_doc if dc_cls_doc is not None else dc_cls.__doc__ or '' + return model + + +def _dataclass_validate_values(self: 'Dataclass') -> None: + # validation errors can occur if this function is called twice on an already initialised dataclass. + # for example if Extra.forbid is enabled, it would consider __pydantic_initialised__ an invalid extra property + if getattr(self, '__pydantic_initialised__'): + return + if getattr(self, '__pydantic_has_field_info_default__', False): + # We need to remove `FieldInfo` values since they are not valid as input + # It's ok to do that because they are obviously the default values! + input_data = {k: v for k, v in self.__dict__.items() if not isinstance(v, FieldInfo)} + else: + input_data = self.__dict__ + d, _, validation_error = validate_model(self.__pydantic_model__, input_data, cls=self.__class__) + if validation_error: + raise validation_error + self.__dict__.update(d) + object.__setattr__(self, '__pydantic_initialised__', True) + + +def _dataclass_validate_assignment_setattr(self: 'Dataclass', name: str, value: Any) -> None: + if self.__pydantic_initialised__: + d = dict(self.__dict__) + d.pop(name, None) + known_field = self.__pydantic_model__.__fields__.get(name, None) + if known_field: + value, error_ = known_field.validate(value, d, loc=name, cls=self.__class__) + if error_: + raise ValidationError([error_], self.__class__) + + object.__setattr__(self, name, value) + + +def _extra_dc_args(cls: Type[Any]) -> Set[str]: + return { + x + for x in dir(cls) + if x not in getattr(cls, '__dataclass_fields__', {}) and not (x.startswith('__') and x.endswith('__')) + } + + +def is_builtin_dataclass(_cls: Type[Any]) -> bool: + """ + Whether a class is a stdlib dataclass + (useful to discriminated a pydantic dataclass that is actually a wrapper around a stdlib dataclass) + + we check that + - `_cls` is a dataclass + - `_cls` is not a processed pydantic dataclass (with a basemodel attached) + - `_cls` is not a pydantic dataclass inheriting directly from a stdlib dataclass + e.g. + ``` + @dataclasses.dataclass + class A: + x: int + + @pydantic.dataclasses.dataclass + class B(A): + y: int + ``` + In this case, when we first check `B`, we make an extra check and look at the annotations ('y'), + which won't be a superset of all the dataclass fields (only the stdlib fields i.e. 'x') + """ + import dataclasses + + return ( + dataclasses.is_dataclass(_cls) + and not hasattr(_cls, '__pydantic_model__') + and set(_cls.__dataclass_fields__).issuperset(set(getattr(_cls, '__annotations__', {}))) + ) + + +def make_dataclass_validator(dc_cls: Type['Dataclass'], config: Type[BaseConfig]) -> 'CallableGenerator': + """ + Create a pydantic.dataclass from a builtin dataclass to add type validation + and yield the validators + It retrieves the parameters of the dataclass and forwards them to the newly created dataclass + """ + yield from _get_validators(dataclass(dc_cls, config=config, validate_on_init=False)) diff --git a/lib/pydantic/datetime_parse.py b/lib/pydantic/datetime_parse.py new file mode 100644 index 00000000..cfd54593 --- /dev/null +++ b/lib/pydantic/datetime_parse.py @@ -0,0 +1,248 @@ +""" +Functions to parse datetime objects. + +We're using regular expressions rather than time.strptime because: +- They provide both validation and parsing. +- They're more flexible for datetimes. +- The date/datetime/time constructors produce friendlier error messages. + +Stolen from https://raw.githubusercontent.com/django/django/main/django/utils/dateparse.py at +9718fa2e8abe430c3526a9278dd976443d4ae3c6 + +Changed to: +* use standard python datetime types not django.utils.timezone +* raise ValueError when regex doesn't match rather than returning None +* support parsing unix timestamps for dates and datetimes +""" +import re +from datetime import date, datetime, time, timedelta, timezone +from typing import Dict, Optional, Type, Union + +from . import errors + +date_expr = r'(?P\d{4})-(?P\d{1,2})-(?P\d{1,2})' +time_expr = ( + r'(?P\d{1,2}):(?P\d{1,2})' + r'(?::(?P\d{1,2})(?:\.(?P\d{1,6})\d{0,6})?)?' + r'(?PZ|[+-]\d{2}(?::?\d{2})?)?$' +) + +date_re = re.compile(f'{date_expr}$') +time_re = re.compile(time_expr) +datetime_re = re.compile(f'{date_expr}[T ]{time_expr}') + +standard_duration_re = re.compile( + r'^' + r'(?:(?P-?\d+) (days?, )?)?' + r'((?:(?P-?\d+):)(?=\d+:\d+))?' + r'(?:(?P-?\d+):)?' + r'(?P-?\d+)' + r'(?:\.(?P\d{1,6})\d{0,6})?' + r'$' +) + +# Support the sections of ISO 8601 date representation that are accepted by timedelta +iso8601_duration_re = re.compile( + r'^(?P[-+]?)' + r'P' + r'(?:(?P\d+(.\d+)?)D)?' + r'(?:T' + r'(?:(?P\d+(.\d+)?)H)?' + r'(?:(?P\d+(.\d+)?)M)?' + r'(?:(?P\d+(.\d+)?)S)?' + r')?' + r'$' +) + +EPOCH = datetime(1970, 1, 1) +# if greater than this, the number is in ms, if less than or equal it's in seconds +# (in seconds this is 11th October 2603, in ms it's 20th August 1970) +MS_WATERSHED = int(2e10) +# slightly more than datetime.max in ns - (datetime.max - EPOCH).total_seconds() * 1e9 +MAX_NUMBER = int(3e20) +StrBytesIntFloat = Union[str, bytes, int, float] + + +def get_numeric(value: StrBytesIntFloat, native_expected_type: str) -> Union[None, int, float]: + if isinstance(value, (int, float)): + return value + try: + return float(value) + except ValueError: + return None + except TypeError: + raise TypeError(f'invalid type; expected {native_expected_type}, string, bytes, int or float') + + +def from_unix_seconds(seconds: Union[int, float]) -> datetime: + if seconds > MAX_NUMBER: + return datetime.max + elif seconds < -MAX_NUMBER: + return datetime.min + + while abs(seconds) > MS_WATERSHED: + seconds /= 1000 + dt = EPOCH + timedelta(seconds=seconds) + return dt.replace(tzinfo=timezone.utc) + + +def _parse_timezone(value: Optional[str], error: Type[Exception]) -> Union[None, int, timezone]: + if value == 'Z': + return timezone.utc + elif value is not None: + offset_mins = int(value[-2:]) if len(value) > 3 else 0 + offset = 60 * int(value[1:3]) + offset_mins + if value[0] == '-': + offset = -offset + try: + return timezone(timedelta(minutes=offset)) + except ValueError: + raise error() + else: + return None + + +def parse_date(value: Union[date, StrBytesIntFloat]) -> date: + """ + Parse a date/int/float/string and return a datetime.date. + + Raise ValueError if the input is well formatted but not a valid date. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, date): + if isinstance(value, datetime): + return value.date() + else: + return value + + number = get_numeric(value, 'date') + if number is not None: + return from_unix_seconds(number).date() + + if isinstance(value, bytes): + value = value.decode() + + match = date_re.match(value) # type: ignore + if match is None: + raise errors.DateError() + + kw = {k: int(v) for k, v in match.groupdict().items()} + + try: + return date(**kw) + except ValueError: + raise errors.DateError() + + +def parse_time(value: Union[time, StrBytesIntFloat]) -> time: + """ + Parse a time/string and return a datetime.time. + + Raise ValueError if the input is well formatted but not a valid time. + Raise ValueError if the input isn't well formatted, in particular if it contains an offset. + """ + if isinstance(value, time): + return value + + number = get_numeric(value, 'time') + if number is not None: + if number >= 86400: + # doesn't make sense since the time time loop back around to 0 + raise errors.TimeError() + return (datetime.min + timedelta(seconds=number)).time() + + if isinstance(value, bytes): + value = value.decode() + + match = time_re.match(value) # type: ignore + if match is None: + raise errors.TimeError() + + kw = match.groupdict() + if kw['microsecond']: + kw['microsecond'] = kw['microsecond'].ljust(6, '0') + + tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.TimeError) + kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None} + kw_['tzinfo'] = tzinfo + + try: + return time(**kw_) # type: ignore + except ValueError: + raise errors.TimeError() + + +def parse_datetime(value: Union[datetime, StrBytesIntFloat]) -> datetime: + """ + Parse a datetime/int/float/string and return a datetime.datetime. + + This function supports time zone offsets. When the input contains one, + the output uses a timezone with a fixed offset from UTC. + + Raise ValueError if the input is well formatted but not a valid datetime. + Raise ValueError if the input isn't well formatted. + """ + if isinstance(value, datetime): + return value + + number = get_numeric(value, 'datetime') + if number is not None: + return from_unix_seconds(number) + + if isinstance(value, bytes): + value = value.decode() + + match = datetime_re.match(value) # type: ignore + if match is None: + raise errors.DateTimeError() + + kw = match.groupdict() + if kw['microsecond']: + kw['microsecond'] = kw['microsecond'].ljust(6, '0') + + tzinfo = _parse_timezone(kw.pop('tzinfo'), errors.DateTimeError) + kw_: Dict[str, Union[None, int, timezone]] = {k: int(v) for k, v in kw.items() if v is not None} + kw_['tzinfo'] = tzinfo + + try: + return datetime(**kw_) # type: ignore + except ValueError: + raise errors.DateTimeError() + + +def parse_duration(value: StrBytesIntFloat) -> timedelta: + """ + Parse a duration int/float/string and return a datetime.timedelta. + + The preferred format for durations in Django is '%d %H:%M:%S.%f'. + + Also supports ISO 8601 representation. + """ + if isinstance(value, timedelta): + return value + + if isinstance(value, (int, float)): + # below code requires a string + value = f'{value:f}' + elif isinstance(value, bytes): + value = value.decode() + + try: + match = standard_duration_re.match(value) or iso8601_duration_re.match(value) + except TypeError: + raise TypeError('invalid type; expected timedelta, string, bytes, int or float') + + if not match: + raise errors.DurationError() + + kw = match.groupdict() + sign = -1 if kw.pop('sign', '+') == '-' else 1 + if kw.get('microseconds'): + kw['microseconds'] = kw['microseconds'].ljust(6, '0') + + if kw.get('seconds') and kw.get('microseconds') and kw['seconds'].startswith('-'): + kw['microseconds'] = '-' + kw['microseconds'] + + kw_ = {k: float(v) for k, v in kw.items() if v is not None} + + return sign * timedelta(**kw_) diff --git a/lib/pydantic/decorator.py b/lib/pydantic/decorator.py new file mode 100644 index 00000000..089aab65 --- /dev/null +++ b/lib/pydantic/decorator.py @@ -0,0 +1,264 @@ +from functools import wraps +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Type, TypeVar, Union, overload + +from . import validator +from .config import Extra +from .errors import ConfigError +from .main import BaseModel, create_model +from .typing import get_all_type_hints +from .utils import to_camel + +__all__ = ('validate_arguments',) + +if TYPE_CHECKING: + from .typing import AnyCallable + + AnyCallableT = TypeVar('AnyCallableT', bound=AnyCallable) + ConfigType = Union[None, Type[Any], Dict[str, Any]] + + +@overload +def validate_arguments(func: None = None, *, config: 'ConfigType' = None) -> Callable[['AnyCallableT'], 'AnyCallableT']: + ... + + +@overload +def validate_arguments(func: 'AnyCallableT') -> 'AnyCallableT': + ... + + +def validate_arguments(func: Optional['AnyCallableT'] = None, *, config: 'ConfigType' = None) -> Any: + """ + Decorator to validate the arguments passed to a function. + """ + + def validate(_func: 'AnyCallable') -> 'AnyCallable': + vd = ValidatedFunction(_func, config) + + @wraps(_func) + def wrapper_function(*args: Any, **kwargs: Any) -> Any: + return vd.call(*args, **kwargs) + + wrapper_function.vd = vd # type: ignore + wrapper_function.validate = vd.init_model_instance # type: ignore + wrapper_function.raw_function = vd.raw_function # type: ignore + wrapper_function.model = vd.model # type: ignore + return wrapper_function + + if func: + return validate(func) + else: + return validate + + +ALT_V_ARGS = 'v__args' +ALT_V_KWARGS = 'v__kwargs' +V_POSITIONAL_ONLY_NAME = 'v__positional_only' +V_DUPLICATE_KWARGS = 'v__duplicate_kwargs' + + +class ValidatedFunction: + def __init__(self, function: 'AnyCallableT', config: 'ConfigType'): # noqa C901 + from inspect import Parameter, signature + + parameters: Mapping[str, Parameter] = signature(function).parameters + + if parameters.keys() & {ALT_V_ARGS, ALT_V_KWARGS, V_POSITIONAL_ONLY_NAME, V_DUPLICATE_KWARGS}: + raise ConfigError( + f'"{ALT_V_ARGS}", "{ALT_V_KWARGS}", "{V_POSITIONAL_ONLY_NAME}" and "{V_DUPLICATE_KWARGS}" ' + f'are not permitted as argument names when using the "{validate_arguments.__name__}" decorator' + ) + + self.raw_function = function + self.arg_mapping: Dict[int, str] = {} + self.positional_only_args = set() + self.v_args_name = 'args' + self.v_kwargs_name = 'kwargs' + + type_hints = get_all_type_hints(function) + takes_args = False + takes_kwargs = False + fields: Dict[str, Tuple[Any, Any]] = {} + for i, (name, p) in enumerate(parameters.items()): + if p.annotation is p.empty: + annotation = Any + else: + annotation = type_hints[name] + + default = ... if p.default is p.empty else p.default + if p.kind == Parameter.POSITIONAL_ONLY: + self.arg_mapping[i] = name + fields[name] = annotation, default + fields[V_POSITIONAL_ONLY_NAME] = List[str], None + self.positional_only_args.add(name) + elif p.kind == Parameter.POSITIONAL_OR_KEYWORD: + self.arg_mapping[i] = name + fields[name] = annotation, default + fields[V_DUPLICATE_KWARGS] = List[str], None + elif p.kind == Parameter.KEYWORD_ONLY: + fields[name] = annotation, default + elif p.kind == Parameter.VAR_POSITIONAL: + self.v_args_name = name + fields[name] = Tuple[annotation, ...], None + takes_args = True + else: + assert p.kind == Parameter.VAR_KEYWORD, p.kind + self.v_kwargs_name = name + fields[name] = Dict[str, annotation], None # type: ignore + takes_kwargs = True + + # these checks avoid a clash between "args" and a field with that name + if not takes_args and self.v_args_name in fields: + self.v_args_name = ALT_V_ARGS + + # same with "kwargs" + if not takes_kwargs and self.v_kwargs_name in fields: + self.v_kwargs_name = ALT_V_KWARGS + + if not takes_args: + # we add the field so validation below can raise the correct exception + fields[self.v_args_name] = List[Any], None + + if not takes_kwargs: + # same with kwargs + fields[self.v_kwargs_name] = Dict[Any, Any], None + + self.create_model(fields, takes_args, takes_kwargs, config) + + def init_model_instance(self, *args: Any, **kwargs: Any) -> BaseModel: + values = self.build_values(args, kwargs) + return self.model(**values) + + def call(self, *args: Any, **kwargs: Any) -> Any: + m = self.init_model_instance(*args, **kwargs) + return self.execute(m) + + def build_values(self, args: Tuple[Any, ...], kwargs: Dict[str, Any]) -> Dict[str, Any]: + values: Dict[str, Any] = {} + if args: + arg_iter = enumerate(args) + while True: + try: + i, a = next(arg_iter) + except StopIteration: + break + arg_name = self.arg_mapping.get(i) + if arg_name is not None: + values[arg_name] = a + else: + values[self.v_args_name] = [a] + [a for _, a in arg_iter] + break + + var_kwargs: Dict[str, Any] = {} + wrong_positional_args = [] + duplicate_kwargs = [] + fields_alias = [ + field.alias + for name, field in self.model.__fields__.items() + if name not in (self.v_args_name, self.v_kwargs_name) + ] + non_var_fields = set(self.model.__fields__) - {self.v_args_name, self.v_kwargs_name} + for k, v in kwargs.items(): + if k in non_var_fields or k in fields_alias: + if k in self.positional_only_args: + wrong_positional_args.append(k) + if k in values: + duplicate_kwargs.append(k) + values[k] = v + else: + var_kwargs[k] = v + + if var_kwargs: + values[self.v_kwargs_name] = var_kwargs + if wrong_positional_args: + values[V_POSITIONAL_ONLY_NAME] = wrong_positional_args + if duplicate_kwargs: + values[V_DUPLICATE_KWARGS] = duplicate_kwargs + return values + + def execute(self, m: BaseModel) -> Any: + d = {k: v for k, v in m._iter() if k in m.__fields_set__ or m.__fields__[k].default_factory} + var_kwargs = d.pop(self.v_kwargs_name, {}) + + if self.v_args_name in d: + args_: List[Any] = [] + in_kwargs = False + kwargs = {} + for name, value in d.items(): + if in_kwargs: + kwargs[name] = value + elif name == self.v_args_name: + args_ += value + in_kwargs = True + else: + args_.append(value) + return self.raw_function(*args_, **kwargs, **var_kwargs) + elif self.positional_only_args: + args_ = [] + kwargs = {} + for name, value in d.items(): + if name in self.positional_only_args: + args_.append(value) + else: + kwargs[name] = value + return self.raw_function(*args_, **kwargs, **var_kwargs) + else: + return self.raw_function(**d, **var_kwargs) + + def create_model(self, fields: Dict[str, Any], takes_args: bool, takes_kwargs: bool, config: 'ConfigType') -> None: + pos_args = len(self.arg_mapping) + + class CustomConfig: + pass + + if not TYPE_CHECKING: # pragma: no branch + if isinstance(config, dict): + CustomConfig = type('Config', (), config) # noqa: F811 + elif config is not None: + CustomConfig = config # noqa: F811 + + if hasattr(CustomConfig, 'fields') or hasattr(CustomConfig, 'alias_generator'): + raise ConfigError( + 'Setting the "fields" and "alias_generator" property on custom Config for ' + '@validate_arguments is not yet supported, please remove.' + ) + + class DecoratorBaseModel(BaseModel): + @validator(self.v_args_name, check_fields=False, allow_reuse=True) + def check_args(cls, v: Optional[List[Any]]) -> Optional[List[Any]]: + if takes_args or v is None: + return v + + raise TypeError(f'{pos_args} positional arguments expected but {pos_args + len(v)} given') + + @validator(self.v_kwargs_name, check_fields=False, allow_reuse=True) + def check_kwargs(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, Any]]: + if takes_kwargs or v is None: + return v + + plural = '' if len(v) == 1 else 's' + keys = ', '.join(map(repr, v.keys())) + raise TypeError(f'unexpected keyword argument{plural}: {keys}') + + @validator(V_POSITIONAL_ONLY_NAME, check_fields=False, allow_reuse=True) + def check_positional_only(cls, v: Optional[List[str]]) -> None: + if v is None: + return + + plural = '' if len(v) == 1 else 's' + keys = ', '.join(map(repr, v)) + raise TypeError(f'positional-only argument{plural} passed as keyword argument{plural}: {keys}') + + @validator(V_DUPLICATE_KWARGS, check_fields=False, allow_reuse=True) + def check_duplicate_kwargs(cls, v: Optional[List[str]]) -> None: + if v is None: + return + + plural = '' if len(v) == 1 else 's' + keys = ', '.join(map(repr, v)) + raise TypeError(f'multiple values for argument{plural}: {keys}') + + class Config(CustomConfig): + extra = getattr(CustomConfig, 'extra', Extra.forbid) + + self.model = create_model(to_camel(self.raw_function.__name__), __base__=DecoratorBaseModel, **fields) diff --git a/lib/pydantic/env_settings.py b/lib/pydantic/env_settings.py new file mode 100644 index 00000000..e9988c01 --- /dev/null +++ b/lib/pydantic/env_settings.py @@ -0,0 +1,346 @@ +import os +import warnings +from pathlib import Path +from typing import AbstractSet, Any, Callable, ClassVar, Dict, List, Mapping, Optional, Tuple, Type, Union + +from .config import BaseConfig, Extra +from .fields import ModelField +from .main import BaseModel +from .typing import StrPath, display_as_type, get_origin, is_union +from .utils import deep_update, path_type, sequence_like + +env_file_sentinel = str(object()) + +SettingsSourceCallable = Callable[['BaseSettings'], Dict[str, Any]] +DotenvType = Union[StrPath, List[StrPath], Tuple[StrPath, ...]] + + +class SettingsError(ValueError): + pass + + +class BaseSettings(BaseModel): + """ + Base class for settings, allowing values to be overridden by environment variables. + + This is useful in production for secrets you do not wish to save in code, it plays nicely with docker(-compose), + Heroku and any 12 factor app design. + """ + + def __init__( + __pydantic_self__, + _env_file: Optional[DotenvType] = env_file_sentinel, + _env_file_encoding: Optional[str] = None, + _env_nested_delimiter: Optional[str] = None, + _secrets_dir: Optional[StrPath] = None, + **values: Any, + ) -> None: + # Uses something other than `self` the first arg to allow "self" as a settable attribute + super().__init__( + **__pydantic_self__._build_values( + values, + _env_file=_env_file, + _env_file_encoding=_env_file_encoding, + _env_nested_delimiter=_env_nested_delimiter, + _secrets_dir=_secrets_dir, + ) + ) + + def _build_values( + self, + init_kwargs: Dict[str, Any], + _env_file: Optional[DotenvType] = None, + _env_file_encoding: Optional[str] = None, + _env_nested_delimiter: Optional[str] = None, + _secrets_dir: Optional[StrPath] = None, + ) -> Dict[str, Any]: + # Configure built-in sources + init_settings = InitSettingsSource(init_kwargs=init_kwargs) + env_settings = EnvSettingsSource( + env_file=(_env_file if _env_file != env_file_sentinel else self.__config__.env_file), + env_file_encoding=( + _env_file_encoding if _env_file_encoding is not None else self.__config__.env_file_encoding + ), + env_nested_delimiter=( + _env_nested_delimiter if _env_nested_delimiter is not None else self.__config__.env_nested_delimiter + ), + env_prefix_len=len(self.__config__.env_prefix), + ) + file_secret_settings = SecretsSettingsSource(secrets_dir=_secrets_dir or self.__config__.secrets_dir) + # Provide a hook to set built-in sources priority and add / remove sources + sources = self.__config__.customise_sources( + init_settings=init_settings, env_settings=env_settings, file_secret_settings=file_secret_settings + ) + if sources: + return deep_update(*reversed([source(self) for source in sources])) + else: + # no one should mean to do this, but I think returning an empty dict is marginally preferable + # to an informative error and much better than a confusing error + return {} + + class Config(BaseConfig): + env_prefix: str = '' + env_file: Optional[DotenvType] = None + env_file_encoding: Optional[str] = None + env_nested_delimiter: Optional[str] = None + secrets_dir: Optional[StrPath] = None + validate_all: bool = True + extra: Extra = Extra.forbid + arbitrary_types_allowed: bool = True + case_sensitive: bool = False + + @classmethod + def prepare_field(cls, field: ModelField) -> None: + env_names: Union[List[str], AbstractSet[str]] + field_info_from_config = cls.get_field_info(field.name) + + env = field_info_from_config.get('env') or field.field_info.extra.get('env') + if env is None: + if field.has_alias: + warnings.warn( + 'aliases are no longer used by BaseSettings to define which environment variables to read. ' + 'Instead use the "env" field setting. ' + 'See https://pydantic-docs.helpmanual.io/usage/settings/#environment-variable-names', + FutureWarning, + ) + env_names = {cls.env_prefix + field.name} + elif isinstance(env, str): + env_names = {env} + elif isinstance(env, (set, frozenset)): + env_names = env + elif sequence_like(env): + env_names = list(env) + else: + raise TypeError(f'invalid field env: {env!r} ({display_as_type(env)}); should be string, list or set') + + if not cls.case_sensitive: + env_names = env_names.__class__(n.lower() for n in env_names) + field.field_info.extra['env_names'] = env_names + + @classmethod + def customise_sources( + cls, + init_settings: SettingsSourceCallable, + env_settings: SettingsSourceCallable, + file_secret_settings: SettingsSourceCallable, + ) -> Tuple[SettingsSourceCallable, ...]: + return init_settings, env_settings, file_secret_settings + + @classmethod + def parse_env_var(cls, field_name: str, raw_val: str) -> Any: + return cls.json_loads(raw_val) + + # populated by the metaclass using the Config class defined above, annotated here to help IDEs only + __config__: ClassVar[Type[Config]] + + +class InitSettingsSource: + __slots__ = ('init_kwargs',) + + def __init__(self, init_kwargs: Dict[str, Any]): + self.init_kwargs = init_kwargs + + def __call__(self, settings: BaseSettings) -> Dict[str, Any]: + return self.init_kwargs + + def __repr__(self) -> str: + return f'InitSettingsSource(init_kwargs={self.init_kwargs!r})' + + +class EnvSettingsSource: + __slots__ = ('env_file', 'env_file_encoding', 'env_nested_delimiter', 'env_prefix_len') + + def __init__( + self, + env_file: Optional[DotenvType], + env_file_encoding: Optional[str], + env_nested_delimiter: Optional[str] = None, + env_prefix_len: int = 0, + ): + self.env_file: Optional[DotenvType] = env_file + self.env_file_encoding: Optional[str] = env_file_encoding + self.env_nested_delimiter: Optional[str] = env_nested_delimiter + self.env_prefix_len: int = env_prefix_len + + def __call__(self, settings: BaseSettings) -> Dict[str, Any]: # noqa C901 + """ + Build environment variables suitable for passing to the Model. + """ + d: Dict[str, Any] = {} + + if settings.__config__.case_sensitive: + env_vars: Mapping[str, Optional[str]] = os.environ + else: + env_vars = {k.lower(): v for k, v in os.environ.items()} + + dotenv_vars = self._read_env_files(settings.__config__.case_sensitive) + if dotenv_vars: + env_vars = {**dotenv_vars, **env_vars} + + for field in settings.__fields__.values(): + env_val: Optional[str] = None + for env_name in field.field_info.extra['env_names']: + env_val = env_vars.get(env_name) + if env_val is not None: + break + + is_complex, allow_parse_failure = self.field_is_complex(field) + if is_complex: + if env_val is None: + # field is complex but no value found so far, try explode_env_vars + env_val_built = self.explode_env_vars(field, env_vars) + if env_val_built: + d[field.alias] = env_val_built + else: + # field is complex and there's a value, decode that as JSON, then add explode_env_vars + try: + env_val = settings.__config__.parse_env_var(field.name, env_val) + except ValueError as e: + if not allow_parse_failure: + raise SettingsError(f'error parsing env var "{env_name}"') from e + + if isinstance(env_val, dict): + d[field.alias] = deep_update(env_val, self.explode_env_vars(field, env_vars)) + else: + d[field.alias] = env_val + elif env_val is not None: + # simplest case, field is not complex, we only need to add the value if it was found + d[field.alias] = env_val + + return d + + def _read_env_files(self, case_sensitive: bool) -> Dict[str, Optional[str]]: + env_files = self.env_file + if env_files is None: + return {} + + if isinstance(env_files, (str, os.PathLike)): + env_files = [env_files] + + dotenv_vars = {} + for env_file in env_files: + env_path = Path(env_file).expanduser() + if env_path.is_file(): + dotenv_vars.update( + read_env_file(env_path, encoding=self.env_file_encoding, case_sensitive=case_sensitive) + ) + + return dotenv_vars + + def field_is_complex(self, field: ModelField) -> Tuple[bool, bool]: + """ + Find out if a field is complex, and if so whether JSON errors should be ignored + """ + if field.is_complex(): + allow_parse_failure = False + elif is_union(get_origin(field.type_)) and field.sub_fields and any(f.is_complex() for f in field.sub_fields): + allow_parse_failure = True + else: + return False, False + + return True, allow_parse_failure + + def explode_env_vars(self, field: ModelField, env_vars: Mapping[str, Optional[str]]) -> Dict[str, Any]: + """ + Process env_vars and extract the values of keys containing env_nested_delimiter into nested dictionaries. + + This is applied to a single field, hence filtering by env_var prefix. + """ + prefixes = [f'{env_name}{self.env_nested_delimiter}' for env_name in field.field_info.extra['env_names']] + result: Dict[str, Any] = {} + for env_name, env_val in env_vars.items(): + if not any(env_name.startswith(prefix) for prefix in prefixes): + continue + # we remove the prefix before splitting in case the prefix has characters in common with the delimiter + env_name_without_prefix = env_name[self.env_prefix_len :] + _, *keys, last_key = env_name_without_prefix.split(self.env_nested_delimiter) + env_var = result + for key in keys: + env_var = env_var.setdefault(key, {}) + env_var[last_key] = env_val + + return result + + def __repr__(self) -> str: + return ( + f'EnvSettingsSource(env_file={self.env_file!r}, env_file_encoding={self.env_file_encoding!r}, ' + f'env_nested_delimiter={self.env_nested_delimiter!r})' + ) + + +class SecretsSettingsSource: + __slots__ = ('secrets_dir',) + + def __init__(self, secrets_dir: Optional[StrPath]): + self.secrets_dir: Optional[StrPath] = secrets_dir + + def __call__(self, settings: BaseSettings) -> Dict[str, Any]: + """ + Build fields from "secrets" files. + """ + secrets: Dict[str, Optional[str]] = {} + + if self.secrets_dir is None: + return secrets + + secrets_path = Path(self.secrets_dir).expanduser() + + if not secrets_path.exists(): + warnings.warn(f'directory "{secrets_path}" does not exist') + return secrets + + if not secrets_path.is_dir(): + raise SettingsError(f'secrets_dir must reference a directory, not a {path_type(secrets_path)}') + + for field in settings.__fields__.values(): + for env_name in field.field_info.extra['env_names']: + path = find_case_path(secrets_path, env_name, settings.__config__.case_sensitive) + if not path: + # path does not exist, we curently don't return a warning for this + continue + + if path.is_file(): + secret_value = path.read_text().strip() + if field.is_complex(): + try: + secret_value = settings.__config__.parse_env_var(field.name, secret_value) + except ValueError as e: + raise SettingsError(f'error parsing env var "{env_name}"') from e + + secrets[field.alias] = secret_value + else: + warnings.warn( + f'attempted to load secret file "{path}" but found a {path_type(path)} instead.', + stacklevel=4, + ) + return secrets + + def __repr__(self) -> str: + return f'SecretsSettingsSource(secrets_dir={self.secrets_dir!r})' + + +def read_env_file( + file_path: StrPath, *, encoding: str = None, case_sensitive: bool = False +) -> Dict[str, Optional[str]]: + try: + from dotenv import dotenv_values + except ImportError as e: + raise ImportError('python-dotenv is not installed, run `pip install pydantic[dotenv]`') from e + + file_vars: Dict[str, Optional[str]] = dotenv_values(file_path, encoding=encoding or 'utf8') + if not case_sensitive: + return {k.lower(): v for k, v in file_vars.items()} + else: + return file_vars + + +def find_case_path(dir_path: Path, file_name: str, case_sensitive: bool) -> Optional[Path]: + """ + Find a file within path's directory matching filename, optionally ignoring case. + """ + for f in dir_path.iterdir(): + if f.name == file_name: + return f + elif not case_sensitive and f.name.lower() == file_name.lower(): + return f + return None diff --git a/lib/pydantic/error_wrappers.py b/lib/pydantic/error_wrappers.py new file mode 100644 index 00000000..5d3204f4 --- /dev/null +++ b/lib/pydantic/error_wrappers.py @@ -0,0 +1,162 @@ +import json +from typing import TYPE_CHECKING, Any, Dict, Generator, List, Optional, Sequence, Tuple, Type, Union + +from .json import pydantic_encoder +from .utils import Representation + +if TYPE_CHECKING: + from typing_extensions import TypedDict + + from .config import BaseConfig + from .types import ModelOrDc + from .typing import ReprArgs + + Loc = Tuple[Union[int, str], ...] + + class _ErrorDictRequired(TypedDict): + loc: Loc + msg: str + type: str + + class ErrorDict(_ErrorDictRequired, total=False): + ctx: Dict[str, Any] + + +__all__ = 'ErrorWrapper', 'ValidationError' + + +class ErrorWrapper(Representation): + __slots__ = 'exc', '_loc' + + def __init__(self, exc: Exception, loc: Union[str, 'Loc']) -> None: + self.exc = exc + self._loc = loc + + def loc_tuple(self) -> 'Loc': + if isinstance(self._loc, tuple): + return self._loc + else: + return (self._loc,) + + def __repr_args__(self) -> 'ReprArgs': + return [('exc', self.exc), ('loc', self.loc_tuple())] + + +# ErrorList is something like Union[List[Union[List[ErrorWrapper], ErrorWrapper]], ErrorWrapper] +# but recursive, therefore just use: +ErrorList = Union[Sequence[Any], ErrorWrapper] + + +class ValidationError(Representation, ValueError): + __slots__ = 'raw_errors', 'model', '_error_cache' + + def __init__(self, errors: Sequence[ErrorList], model: 'ModelOrDc') -> None: + self.raw_errors = errors + self.model = model + self._error_cache: Optional[List['ErrorDict']] = None + + def errors(self) -> List['ErrorDict']: + if self._error_cache is None: + try: + config = self.model.__config__ # type: ignore + except AttributeError: + config = self.model.__pydantic_model__.__config__ # type: ignore + self._error_cache = list(flatten_errors(self.raw_errors, config)) + return self._error_cache + + def json(self, *, indent: Union[None, int, str] = 2) -> str: + return json.dumps(self.errors(), indent=indent, default=pydantic_encoder) + + def __str__(self) -> str: + errors = self.errors() + no_errors = len(errors) + return ( + f'{no_errors} validation error{"" if no_errors == 1 else "s"} for {self.model.__name__}\n' + f'{display_errors(errors)}' + ) + + def __repr_args__(self) -> 'ReprArgs': + return [('model', self.model.__name__), ('errors', self.errors())] + + +def display_errors(errors: List['ErrorDict']) -> str: + return '\n'.join(f'{_display_error_loc(e)}\n {e["msg"]} ({_display_error_type_and_ctx(e)})' for e in errors) + + +def _display_error_loc(error: 'ErrorDict') -> str: + return ' -> '.join(str(e) for e in error['loc']) + + +def _display_error_type_and_ctx(error: 'ErrorDict') -> str: + t = 'type=' + error['type'] + ctx = error.get('ctx') + if ctx: + return t + ''.join(f'; {k}={v}' for k, v in ctx.items()) + else: + return t + + +def flatten_errors( + errors: Sequence[Any], config: Type['BaseConfig'], loc: Optional['Loc'] = None +) -> Generator['ErrorDict', None, None]: + for error in errors: + if isinstance(error, ErrorWrapper): + + if loc: + error_loc = loc + error.loc_tuple() + else: + error_loc = error.loc_tuple() + + if isinstance(error.exc, ValidationError): + yield from flatten_errors(error.exc.raw_errors, config, error_loc) + else: + yield error_dict(error.exc, config, error_loc) + elif isinstance(error, list): + yield from flatten_errors(error, config, loc=loc) + else: + raise RuntimeError(f'Unknown error object: {error}') + + +def error_dict(exc: Exception, config: Type['BaseConfig'], loc: 'Loc') -> 'ErrorDict': + type_ = get_exc_type(exc.__class__) + msg_template = config.error_msg_templates.get(type_) or getattr(exc, 'msg_template', None) + ctx = exc.__dict__ + if msg_template: + msg = msg_template.format(**ctx) + else: + msg = str(exc) + + d: 'ErrorDict' = {'loc': loc, 'msg': msg, 'type': type_} + + if ctx: + d['ctx'] = ctx + + return d + + +_EXC_TYPE_CACHE: Dict[Type[Exception], str] = {} + + +def get_exc_type(cls: Type[Exception]) -> str: + # slightly more efficient than using lru_cache since we don't need to worry about the cache filling up + try: + return _EXC_TYPE_CACHE[cls] + except KeyError: + r = _get_exc_type(cls) + _EXC_TYPE_CACHE[cls] = r + return r + + +def _get_exc_type(cls: Type[Exception]) -> str: + if issubclass(cls, AssertionError): + return 'assertion_error' + + base_name = 'type_error' if issubclass(cls, TypeError) else 'value_error' + if cls in (TypeError, ValueError): + # just TypeError or ValueError, no extra code + return base_name + + # if it's not a TypeError or ValueError, we just take the lowercase of the exception name + # no chaining or snake case logic, use "code" for more complex error types. + code = getattr(cls, 'code', None) or cls.__name__.replace('Error', '').lower() + return base_name + '.' + code diff --git a/lib/pydantic/errors.py b/lib/pydantic/errors.py new file mode 100644 index 00000000..7bdafdd1 --- /dev/null +++ b/lib/pydantic/errors.py @@ -0,0 +1,646 @@ +from decimal import Decimal +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Sequence, Set, Tuple, Type, Union + +from .typing import display_as_type + +if TYPE_CHECKING: + from .typing import DictStrAny + +# explicitly state exports to avoid "from .errors import *" also importing Decimal, Path etc. +__all__ = ( + 'PydanticTypeError', + 'PydanticValueError', + 'ConfigError', + 'MissingError', + 'ExtraError', + 'NoneIsNotAllowedError', + 'NoneIsAllowedError', + 'WrongConstantError', + 'NotNoneError', + 'BoolError', + 'BytesError', + 'DictError', + 'EmailError', + 'UrlError', + 'UrlSchemeError', + 'UrlSchemePermittedError', + 'UrlUserInfoError', + 'UrlHostError', + 'UrlHostTldError', + 'UrlPortError', + 'UrlExtraError', + 'EnumError', + 'IntEnumError', + 'EnumMemberError', + 'IntegerError', + 'FloatError', + 'PathError', + 'PathNotExistsError', + 'PathNotAFileError', + 'PathNotADirectoryError', + 'PyObjectError', + 'SequenceError', + 'ListError', + 'SetError', + 'FrozenSetError', + 'TupleError', + 'TupleLengthError', + 'ListMinLengthError', + 'ListMaxLengthError', + 'ListUniqueItemsError', + 'SetMinLengthError', + 'SetMaxLengthError', + 'FrozenSetMinLengthError', + 'FrozenSetMaxLengthError', + 'AnyStrMinLengthError', + 'AnyStrMaxLengthError', + 'StrError', + 'StrRegexError', + 'NumberNotGtError', + 'NumberNotGeError', + 'NumberNotLtError', + 'NumberNotLeError', + 'NumberNotMultipleError', + 'DecimalError', + 'DecimalIsNotFiniteError', + 'DecimalMaxDigitsError', + 'DecimalMaxPlacesError', + 'DecimalWholeDigitsError', + 'DateTimeError', + 'DateError', + 'DateNotInThePastError', + 'DateNotInTheFutureError', + 'TimeError', + 'DurationError', + 'HashableError', + 'UUIDError', + 'UUIDVersionError', + 'ArbitraryTypeError', + 'ClassError', + 'SubclassError', + 'JsonError', + 'JsonTypeError', + 'PatternError', + 'DataclassTypeError', + 'CallableError', + 'IPvAnyAddressError', + 'IPvAnyInterfaceError', + 'IPvAnyNetworkError', + 'IPv4AddressError', + 'IPv6AddressError', + 'IPv4NetworkError', + 'IPv6NetworkError', + 'IPv4InterfaceError', + 'IPv6InterfaceError', + 'ColorError', + 'StrictBoolError', + 'NotDigitError', + 'LuhnValidationError', + 'InvalidLengthForBrand', + 'InvalidByteSize', + 'InvalidByteSizeUnit', + 'MissingDiscriminator', + 'InvalidDiscriminator', +) + + +def cls_kwargs(cls: Type['PydanticErrorMixin'], ctx: 'DictStrAny') -> 'PydanticErrorMixin': + """ + For built-in exceptions like ValueError or TypeError, we need to implement + __reduce__ to override the default behaviour (instead of __getstate__/__setstate__) + By default pickle protocol 2 calls `cls.__new__(cls, *args)`. + Since we only use kwargs, we need a little constructor to change that. + Note: the callable can't be a lambda as pickle looks in the namespace to find it + """ + return cls(**ctx) + + +class PydanticErrorMixin: + code: str + msg_template: str + + def __init__(self, **ctx: Any) -> None: + self.__dict__ = ctx + + def __str__(self) -> str: + return self.msg_template.format(**self.__dict__) + + def __reduce__(self) -> Tuple[Callable[..., 'PydanticErrorMixin'], Tuple[Type['PydanticErrorMixin'], 'DictStrAny']]: + return cls_kwargs, (self.__class__, self.__dict__) + + +class PydanticTypeError(PydanticErrorMixin, TypeError): + pass + + +class PydanticValueError(PydanticErrorMixin, ValueError): + pass + + +class ConfigError(RuntimeError): + pass + + +class MissingError(PydanticValueError): + msg_template = 'field required' + + +class ExtraError(PydanticValueError): + msg_template = 'extra fields not permitted' + + +class NoneIsNotAllowedError(PydanticTypeError): + code = 'none.not_allowed' + msg_template = 'none is not an allowed value' + + +class NoneIsAllowedError(PydanticTypeError): + code = 'none.allowed' + msg_template = 'value is not none' + + +class WrongConstantError(PydanticValueError): + code = 'const' + + def __str__(self) -> str: + permitted = ', '.join(repr(v) for v in self.permitted) # type: ignore + return f'unexpected value; permitted: {permitted}' + + +class NotNoneError(PydanticTypeError): + code = 'not_none' + msg_template = 'value is not None' + + +class BoolError(PydanticTypeError): + msg_template = 'value could not be parsed to a boolean' + + +class BytesError(PydanticTypeError): + msg_template = 'byte type expected' + + +class DictError(PydanticTypeError): + msg_template = 'value is not a valid dict' + + +class EmailError(PydanticValueError): + msg_template = 'value is not a valid email address' + + +class UrlError(PydanticValueError): + code = 'url' + + +class UrlSchemeError(UrlError): + code = 'url.scheme' + msg_template = 'invalid or missing URL scheme' + + +class UrlSchemePermittedError(UrlError): + code = 'url.scheme' + msg_template = 'URL scheme not permitted' + + def __init__(self, allowed_schemes: Set[str]): + super().__init__(allowed_schemes=allowed_schemes) + + +class UrlUserInfoError(UrlError): + code = 'url.userinfo' + msg_template = 'userinfo required in URL but missing' + + +class UrlHostError(UrlError): + code = 'url.host' + msg_template = 'URL host invalid' + + +class UrlHostTldError(UrlError): + code = 'url.host' + msg_template = 'URL host invalid, top level domain required' + + +class UrlPortError(UrlError): + code = 'url.port' + msg_template = 'URL port invalid, port cannot exceed 65535' + + +class UrlExtraError(UrlError): + code = 'url.extra' + msg_template = 'URL invalid, extra characters found after valid URL: {extra!r}' + + +class EnumMemberError(PydanticTypeError): + code = 'enum' + + def __str__(self) -> str: + permitted = ', '.join(repr(v.value) for v in self.enum_values) # type: ignore + return f'value is not a valid enumeration member; permitted: {permitted}' + + +class IntegerError(PydanticTypeError): + msg_template = 'value is not a valid integer' + + +class FloatError(PydanticTypeError): + msg_template = 'value is not a valid float' + + +class PathError(PydanticTypeError): + msg_template = 'value is not a valid path' + + +class _PathValueError(PydanticValueError): + def __init__(self, *, path: Path) -> None: + super().__init__(path=str(path)) + + +class PathNotExistsError(_PathValueError): + code = 'path.not_exists' + msg_template = 'file or directory at path "{path}" does not exist' + + +class PathNotAFileError(_PathValueError): + code = 'path.not_a_file' + msg_template = 'path "{path}" does not point to a file' + + +class PathNotADirectoryError(_PathValueError): + code = 'path.not_a_directory' + msg_template = 'path "{path}" does not point to a directory' + + +class PyObjectError(PydanticTypeError): + msg_template = 'ensure this value contains valid import path or valid callable: {error_message}' + + +class SequenceError(PydanticTypeError): + msg_template = 'value is not a valid sequence' + + +class IterableError(PydanticTypeError): + msg_template = 'value is not a valid iterable' + + +class ListError(PydanticTypeError): + msg_template = 'value is not a valid list' + + +class SetError(PydanticTypeError): + msg_template = 'value is not a valid set' + + +class FrozenSetError(PydanticTypeError): + msg_template = 'value is not a valid frozenset' + + +class DequeError(PydanticTypeError): + msg_template = 'value is not a valid deque' + + +class TupleError(PydanticTypeError): + msg_template = 'value is not a valid tuple' + + +class TupleLengthError(PydanticValueError): + code = 'tuple.length' + msg_template = 'wrong tuple length {actual_length}, expected {expected_length}' + + def __init__(self, *, actual_length: int, expected_length: int) -> None: + super().__init__(actual_length=actual_length, expected_length=expected_length) + + +class ListMinLengthError(PydanticValueError): + code = 'list.min_items' + msg_template = 'ensure this value has at least {limit_value} items' + + def __init__(self, *, limit_value: int) -> None: + super().__init__(limit_value=limit_value) + + +class ListMaxLengthError(PydanticValueError): + code = 'list.max_items' + msg_template = 'ensure this value has at most {limit_value} items' + + def __init__(self, *, limit_value: int) -> None: + super().__init__(limit_value=limit_value) + + +class ListUniqueItemsError(PydanticValueError): + code = 'list.unique_items' + msg_template = 'the list has duplicated items' + + +class SetMinLengthError(PydanticValueError): + code = 'set.min_items' + msg_template = 'ensure this value has at least {limit_value} items' + + def __init__(self, *, limit_value: int) -> None: + super().__init__(limit_value=limit_value) + + +class SetMaxLengthError(PydanticValueError): + code = 'set.max_items' + msg_template = 'ensure this value has at most {limit_value} items' + + def __init__(self, *, limit_value: int) -> None: + super().__init__(limit_value=limit_value) + + +class FrozenSetMinLengthError(PydanticValueError): + code = 'frozenset.min_items' + msg_template = 'ensure this value has at least {limit_value} items' + + def __init__(self, *, limit_value: int) -> None: + super().__init__(limit_value=limit_value) + + +class FrozenSetMaxLengthError(PydanticValueError): + code = 'frozenset.max_items' + msg_template = 'ensure this value has at most {limit_value} items' + + def __init__(self, *, limit_value: int) -> None: + super().__init__(limit_value=limit_value) + + +class AnyStrMinLengthError(PydanticValueError): + code = 'any_str.min_length' + msg_template = 'ensure this value has at least {limit_value} characters' + + def __init__(self, *, limit_value: int) -> None: + super().__init__(limit_value=limit_value) + + +class AnyStrMaxLengthError(PydanticValueError): + code = 'any_str.max_length' + msg_template = 'ensure this value has at most {limit_value} characters' + + def __init__(self, *, limit_value: int) -> None: + super().__init__(limit_value=limit_value) + + +class StrError(PydanticTypeError): + msg_template = 'str type expected' + + +class StrRegexError(PydanticValueError): + code = 'str.regex' + msg_template = 'string does not match regex "{pattern}"' + + def __init__(self, *, pattern: str) -> None: + super().__init__(pattern=pattern) + + +class _NumberBoundError(PydanticValueError): + def __init__(self, *, limit_value: Union[int, float, Decimal]) -> None: + super().__init__(limit_value=limit_value) + + +class NumberNotGtError(_NumberBoundError): + code = 'number.not_gt' + msg_template = 'ensure this value is greater than {limit_value}' + + +class NumberNotGeError(_NumberBoundError): + code = 'number.not_ge' + msg_template = 'ensure this value is greater than or equal to {limit_value}' + + +class NumberNotLtError(_NumberBoundError): + code = 'number.not_lt' + msg_template = 'ensure this value is less than {limit_value}' + + +class NumberNotLeError(_NumberBoundError): + code = 'number.not_le' + msg_template = 'ensure this value is less than or equal to {limit_value}' + + +class NumberNotFiniteError(PydanticValueError): + code = 'number.not_finite_number' + msg_template = 'ensure this value is a finite number' + + +class NumberNotMultipleError(PydanticValueError): + code = 'number.not_multiple' + msg_template = 'ensure this value is a multiple of {multiple_of}' + + def __init__(self, *, multiple_of: Union[int, float, Decimal]) -> None: + super().__init__(multiple_of=multiple_of) + + +class DecimalError(PydanticTypeError): + msg_template = 'value is not a valid decimal' + + +class DecimalIsNotFiniteError(PydanticValueError): + code = 'decimal.not_finite' + msg_template = 'value is not a valid decimal' + + +class DecimalMaxDigitsError(PydanticValueError): + code = 'decimal.max_digits' + msg_template = 'ensure that there are no more than {max_digits} digits in total' + + def __init__(self, *, max_digits: int) -> None: + super().__init__(max_digits=max_digits) + + +class DecimalMaxPlacesError(PydanticValueError): + code = 'decimal.max_places' + msg_template = 'ensure that there are no more than {decimal_places} decimal places' + + def __init__(self, *, decimal_places: int) -> None: + super().__init__(decimal_places=decimal_places) + + +class DecimalWholeDigitsError(PydanticValueError): + code = 'decimal.whole_digits' + msg_template = 'ensure that there are no more than {whole_digits} digits before the decimal point' + + def __init__(self, *, whole_digits: int) -> None: + super().__init__(whole_digits=whole_digits) + + +class DateTimeError(PydanticValueError): + msg_template = 'invalid datetime format' + + +class DateError(PydanticValueError): + msg_template = 'invalid date format' + + +class DateNotInThePastError(PydanticValueError): + code = 'date.not_in_the_past' + msg_template = 'date is not in the past' + + +class DateNotInTheFutureError(PydanticValueError): + code = 'date.not_in_the_future' + msg_template = 'date is not in the future' + + +class TimeError(PydanticValueError): + msg_template = 'invalid time format' + + +class DurationError(PydanticValueError): + msg_template = 'invalid duration format' + + +class HashableError(PydanticTypeError): + msg_template = 'value is not a valid hashable' + + +class UUIDError(PydanticTypeError): + msg_template = 'value is not a valid uuid' + + +class UUIDVersionError(PydanticValueError): + code = 'uuid.version' + msg_template = 'uuid version {required_version} expected' + + def __init__(self, *, required_version: int) -> None: + super().__init__(required_version=required_version) + + +class ArbitraryTypeError(PydanticTypeError): + code = 'arbitrary_type' + msg_template = 'instance of {expected_arbitrary_type} expected' + + def __init__(self, *, expected_arbitrary_type: Type[Any]) -> None: + super().__init__(expected_arbitrary_type=display_as_type(expected_arbitrary_type)) + + +class ClassError(PydanticTypeError): + code = 'class' + msg_template = 'a class is expected' + + +class SubclassError(PydanticTypeError): + code = 'subclass' + msg_template = 'subclass of {expected_class} expected' + + def __init__(self, *, expected_class: Type[Any]) -> None: + super().__init__(expected_class=display_as_type(expected_class)) + + +class JsonError(PydanticValueError): + msg_template = 'Invalid JSON' + + +class JsonTypeError(PydanticTypeError): + code = 'json' + msg_template = 'JSON object must be str, bytes or bytearray' + + +class PatternError(PydanticValueError): + code = 'regex_pattern' + msg_template = 'Invalid regular expression' + + +class DataclassTypeError(PydanticTypeError): + code = 'dataclass' + msg_template = 'instance of {class_name}, tuple or dict expected' + + +class CallableError(PydanticTypeError): + msg_template = '{value} is not callable' + + +class EnumError(PydanticTypeError): + code = 'enum_instance' + msg_template = '{value} is not a valid Enum instance' + + +class IntEnumError(PydanticTypeError): + code = 'int_enum_instance' + msg_template = '{value} is not a valid IntEnum instance' + + +class IPvAnyAddressError(PydanticValueError): + msg_template = 'value is not a valid IPv4 or IPv6 address' + + +class IPvAnyInterfaceError(PydanticValueError): + msg_template = 'value is not a valid IPv4 or IPv6 interface' + + +class IPvAnyNetworkError(PydanticValueError): + msg_template = 'value is not a valid IPv4 or IPv6 network' + + +class IPv4AddressError(PydanticValueError): + msg_template = 'value is not a valid IPv4 address' + + +class IPv6AddressError(PydanticValueError): + msg_template = 'value is not a valid IPv6 address' + + +class IPv4NetworkError(PydanticValueError): + msg_template = 'value is not a valid IPv4 network' + + +class IPv6NetworkError(PydanticValueError): + msg_template = 'value is not a valid IPv6 network' + + +class IPv4InterfaceError(PydanticValueError): + msg_template = 'value is not a valid IPv4 interface' + + +class IPv6InterfaceError(PydanticValueError): + msg_template = 'value is not a valid IPv6 interface' + + +class ColorError(PydanticValueError): + msg_template = 'value is not a valid color: {reason}' + + +class StrictBoolError(PydanticValueError): + msg_template = 'value is not a valid boolean' + + +class NotDigitError(PydanticValueError): + code = 'payment_card_number.digits' + msg_template = 'card number is not all digits' + + +class LuhnValidationError(PydanticValueError): + code = 'payment_card_number.luhn_check' + msg_template = 'card number is not luhn valid' + + +class InvalidLengthForBrand(PydanticValueError): + code = 'payment_card_number.invalid_length_for_brand' + msg_template = 'Length for a {brand} card must be {required_length}' + + +class InvalidByteSize(PydanticValueError): + msg_template = 'could not parse value and unit from byte string' + + +class InvalidByteSizeUnit(PydanticValueError): + msg_template = 'could not interpret byte unit: {unit}' + + +class MissingDiscriminator(PydanticValueError): + code = 'discriminated_union.missing_discriminator' + msg_template = 'Discriminator {discriminator_key!r} is missing in value' + + +class InvalidDiscriminator(PydanticValueError): + code = 'discriminated_union.invalid_discriminator' + msg_template = ( + 'No match for discriminator {discriminator_key!r} and value {discriminator_value!r} ' + '(allowed values: {allowed_values})' + ) + + def __init__(self, *, discriminator_key: str, discriminator_value: Any, allowed_values: Sequence[Any]) -> None: + super().__init__( + discriminator_key=discriminator_key, + discriminator_value=discriminator_value, + allowed_values=', '.join(map(repr, allowed_values)), + ) diff --git a/lib/pydantic/fields.py b/lib/pydantic/fields.py new file mode 100644 index 00000000..cecd3d20 --- /dev/null +++ b/lib/pydantic/fields.py @@ -0,0 +1,1247 @@ +import copy +import re +from collections import Counter as CollectionCounter, defaultdict, deque +from collections.abc import Callable, Hashable as CollectionsHashable, Iterable as CollectionsIterable +from typing import ( + TYPE_CHECKING, + Any, + Counter, + DefaultDict, + Deque, + Dict, + ForwardRef, + FrozenSet, + Generator, + Iterable, + Iterator, + List, + Mapping, + Optional, + Pattern, + Sequence, + Set, + Tuple, + Type, + TypeVar, + Union, +) + +from typing_extensions import Annotated, Final + +from . import errors as errors_ +from .class_validators import Validator, make_generic_validator, prep_validators +from .error_wrappers import ErrorWrapper +from .errors import ConfigError, InvalidDiscriminator, MissingDiscriminator, NoneIsNotAllowedError +from .types import Json, JsonWrapper +from .typing import ( + NoArgAnyCallable, + convert_generics, + display_as_type, + get_args, + get_origin, + is_finalvar, + is_literal_type, + is_new_type, + is_none_type, + is_typeddict, + is_typeddict_special, + is_union, + new_type_supertype, +) +from .utils import ( + PyObjectStr, + Representation, + ValueItems, + get_discriminator_alias_and_values, + get_unique_discriminator_alias, + lenient_isinstance, + lenient_issubclass, + sequence_like, + smart_deepcopy, +) +from .validators import constant_validator, dict_validator, find_validators, validate_json + +Required: Any = Ellipsis + +T = TypeVar('T') + + +class UndefinedType: + def __repr__(self) -> str: + return 'PydanticUndefined' + + def __copy__(self: T) -> T: + return self + + def __reduce__(self) -> str: + return 'Undefined' + + def __deepcopy__(self: T, _: Any) -> T: + return self + + +Undefined = UndefinedType() + +if TYPE_CHECKING: + from .class_validators import ValidatorsList + from .config import BaseConfig + from .error_wrappers import ErrorList + from .types import ModelOrDc + from .typing import AbstractSetIntStr, MappingIntStrAny, ReprArgs + + ValidateReturn = Tuple[Optional[Any], Optional[ErrorList]] + LocStr = Union[Tuple[Union[int, str], ...], str] + BoolUndefined = Union[bool, UndefinedType] + + +class FieldInfo(Representation): + """ + Captures extra information about a field. + """ + + __slots__ = ( + 'default', + 'default_factory', + 'alias', + 'alias_priority', + 'title', + 'description', + 'exclude', + 'include', + 'const', + 'gt', + 'ge', + 'lt', + 'le', + 'multiple_of', + 'allow_inf_nan', + 'max_digits', + 'decimal_places', + 'min_items', + 'max_items', + 'unique_items', + 'min_length', + 'max_length', + 'allow_mutation', + 'repr', + 'regex', + 'discriminator', + 'extra', + ) + + # field constraints with the default value, it's also used in update_from_config below + __field_constraints__ = { + 'min_length': None, + 'max_length': None, + 'regex': None, + 'gt': None, + 'lt': None, + 'ge': None, + 'le': None, + 'multiple_of': None, + 'allow_inf_nan': None, + 'max_digits': None, + 'decimal_places': None, + 'min_items': None, + 'max_items': None, + 'unique_items': None, + 'allow_mutation': True, + } + + def __init__(self, default: Any = Undefined, **kwargs: Any) -> None: + self.default = default + self.default_factory = kwargs.pop('default_factory', None) + self.alias = kwargs.pop('alias', None) + self.alias_priority = kwargs.pop('alias_priority', 2 if self.alias is not None else None) + self.title = kwargs.pop('title', None) + self.description = kwargs.pop('description', None) + self.exclude = kwargs.pop('exclude', None) + self.include = kwargs.pop('include', None) + self.const = kwargs.pop('const', None) + self.gt = kwargs.pop('gt', None) + self.ge = kwargs.pop('ge', None) + self.lt = kwargs.pop('lt', None) + self.le = kwargs.pop('le', None) + self.multiple_of = kwargs.pop('multiple_of', None) + self.allow_inf_nan = kwargs.pop('allow_inf_nan', None) + self.max_digits = kwargs.pop('max_digits', None) + self.decimal_places = kwargs.pop('decimal_places', None) + self.min_items = kwargs.pop('min_items', None) + self.max_items = kwargs.pop('max_items', None) + self.unique_items = kwargs.pop('unique_items', None) + self.min_length = kwargs.pop('min_length', None) + self.max_length = kwargs.pop('max_length', None) + self.allow_mutation = kwargs.pop('allow_mutation', True) + self.regex = kwargs.pop('regex', None) + self.discriminator = kwargs.pop('discriminator', None) + self.repr = kwargs.pop('repr', True) + self.extra = kwargs + + def __repr_args__(self) -> 'ReprArgs': + + field_defaults_to_hide: Dict[str, Any] = { + 'repr': True, + **self.__field_constraints__, + } + + attrs = ((s, getattr(self, s)) for s in self.__slots__) + return [(a, v) for a, v in attrs if v != field_defaults_to_hide.get(a, None)] + + def get_constraints(self) -> Set[str]: + """ + Gets the constraints set on the field by comparing the constraint value with its default value + + :return: the constraints set on field_info + """ + return {attr for attr, default in self.__field_constraints__.items() if getattr(self, attr) != default} + + def update_from_config(self, from_config: Dict[str, Any]) -> None: + """ + Update this FieldInfo based on a dict from get_field_info, only fields which have not been set are dated. + """ + for attr_name, value in from_config.items(): + try: + current_value = getattr(self, attr_name) + except AttributeError: + # attr_name is not an attribute of FieldInfo, it should therefore be added to extra + # (except if extra already has this value!) + self.extra.setdefault(attr_name, value) + else: + if current_value is self.__field_constraints__.get(attr_name, None): + setattr(self, attr_name, value) + elif attr_name == 'exclude': + self.exclude = ValueItems.merge(value, current_value) + elif attr_name == 'include': + self.include = ValueItems.merge(value, current_value, intersect=True) + + def _validate(self) -> None: + if self.default is not Undefined and self.default_factory is not None: + raise ValueError('cannot specify both default and default_factory') + + +def Field( + default: Any = Undefined, + *, + default_factory: Optional[NoArgAnyCallable] = None, + alias: str = None, + title: str = None, + description: str = None, + exclude: Union['AbstractSetIntStr', 'MappingIntStrAny', Any] = None, + include: Union['AbstractSetIntStr', 'MappingIntStrAny', Any] = None, + const: bool = None, + gt: float = None, + ge: float = None, + lt: float = None, + le: float = None, + multiple_of: float = None, + allow_inf_nan: bool = None, + max_digits: int = None, + decimal_places: int = None, + min_items: int = None, + max_items: int = None, + unique_items: bool = None, + min_length: int = None, + max_length: int = None, + allow_mutation: bool = True, + regex: str = None, + discriminator: str = None, + repr: bool = True, + **extra: Any, +) -> Any: + """ + Used to provide extra information about a field, either for the model schema or complex validation. Some arguments + apply only to number fields (``int``, ``float``, ``Decimal``) and some apply only to ``str``. + + :param default: since this is replacing the field’s default, its first argument is used + to set the default, use ellipsis (``...``) to indicate the field is required + :param default_factory: callable that will be called when a default value is needed for this field + If both `default` and `default_factory` are set, an error is raised. + :param alias: the public name of the field + :param title: can be any string, used in the schema + :param description: can be any string, used in the schema + :param exclude: exclude this field while dumping. + Takes same values as the ``include`` and ``exclude`` arguments on the ``.dict`` method. + :param include: include this field while dumping. + Takes same values as the ``include`` and ``exclude`` arguments on the ``.dict`` method. + :param const: this field is required and *must* take it's default value + :param gt: only applies to numbers, requires the field to be "greater than". The schema + will have an ``exclusiveMinimum`` validation keyword + :param ge: only applies to numbers, requires the field to be "greater than or equal to". The + schema will have a ``minimum`` validation keyword + :param lt: only applies to numbers, requires the field to be "less than". The schema + will have an ``exclusiveMaximum`` validation keyword + :param le: only applies to numbers, requires the field to be "less than or equal to". The + schema will have a ``maximum`` validation keyword + :param multiple_of: only applies to numbers, requires the field to be "a multiple of". The + schema will have a ``multipleOf`` validation keyword + :param allow_inf_nan: only applies to numbers, allows the field to be NaN or infinity (+inf or -inf), + which is a valid Python float. Default True, set to False for compatibility with JSON. + :param max_digits: only applies to Decimals, requires the field to have a maximum number + of digits within the decimal. It does not include a zero before the decimal point or trailing decimal zeroes. + :param decimal_places: only applies to Decimals, requires the field to have at most a number of decimal places + allowed. It does not include trailing decimal zeroes. + :param min_items: only applies to lists, requires the field to have a minimum number of + elements. The schema will have a ``minItems`` validation keyword + :param max_items: only applies to lists, requires the field to have a maximum number of + elements. The schema will have a ``maxItems`` validation keyword + :param unique_items: only applies to lists, requires the field not to have duplicated + elements. The schema will have a ``uniqueItems`` validation keyword + :param min_length: only applies to strings, requires the field to have a minimum length. The + schema will have a ``maximum`` validation keyword + :param max_length: only applies to strings, requires the field to have a maximum length. The + schema will have a ``maxLength`` validation keyword + :param allow_mutation: a boolean which defaults to True. When False, the field raises a TypeError if the field is + assigned on an instance. The BaseModel Config must set validate_assignment to True + :param regex: only applies to strings, requires the field match against a regular expression + pattern string. The schema will have a ``pattern`` validation keyword + :param discriminator: only useful with a (discriminated a.k.a. tagged) `Union` of sub models with a common field. + The `discriminator` is the name of this common field to shorten validation and improve generated schema + :param repr: show this field in the representation + :param **extra: any additional keyword arguments will be added as is to the schema + """ + field_info = FieldInfo( + default, + default_factory=default_factory, + alias=alias, + title=title, + description=description, + exclude=exclude, + include=include, + const=const, + gt=gt, + ge=ge, + lt=lt, + le=le, + multiple_of=multiple_of, + allow_inf_nan=allow_inf_nan, + max_digits=max_digits, + decimal_places=decimal_places, + min_items=min_items, + max_items=max_items, + unique_items=unique_items, + min_length=min_length, + max_length=max_length, + allow_mutation=allow_mutation, + regex=regex, + discriminator=discriminator, + repr=repr, + **extra, + ) + field_info._validate() + return field_info + + +# used to be an enum but changed to int's for small performance improvement as less access overhead +SHAPE_SINGLETON = 1 +SHAPE_LIST = 2 +SHAPE_SET = 3 +SHAPE_MAPPING = 4 +SHAPE_TUPLE = 5 +SHAPE_TUPLE_ELLIPSIS = 6 +SHAPE_SEQUENCE = 7 +SHAPE_FROZENSET = 8 +SHAPE_ITERABLE = 9 +SHAPE_GENERIC = 10 +SHAPE_DEQUE = 11 +SHAPE_DICT = 12 +SHAPE_DEFAULTDICT = 13 +SHAPE_COUNTER = 14 +SHAPE_NAME_LOOKUP = { + SHAPE_LIST: 'List[{}]', + SHAPE_SET: 'Set[{}]', + SHAPE_TUPLE_ELLIPSIS: 'Tuple[{}, ...]', + SHAPE_SEQUENCE: 'Sequence[{}]', + SHAPE_FROZENSET: 'FrozenSet[{}]', + SHAPE_ITERABLE: 'Iterable[{}]', + SHAPE_DEQUE: 'Deque[{}]', + SHAPE_DICT: 'Dict[{}]', + SHAPE_DEFAULTDICT: 'DefaultDict[{}]', + SHAPE_COUNTER: 'Counter[{}]', +} + +MAPPING_LIKE_SHAPES: Set[int] = {SHAPE_DEFAULTDICT, SHAPE_DICT, SHAPE_MAPPING, SHAPE_COUNTER} + + +class ModelField(Representation): + __slots__ = ( + 'type_', + 'outer_type_', + 'annotation', + 'sub_fields', + 'sub_fields_mapping', + 'key_field', + 'validators', + 'pre_validators', + 'post_validators', + 'default', + 'default_factory', + 'required', + 'final', + 'model_config', + 'name', + 'alias', + 'has_alias', + 'field_info', + 'discriminator_key', + 'discriminator_alias', + 'validate_always', + 'allow_none', + 'shape', + 'class_validators', + 'parse_json', + ) + + def __init__( + self, + *, + name: str, + type_: Type[Any], + class_validators: Optional[Dict[str, Validator]], + model_config: Type['BaseConfig'], + default: Any = None, + default_factory: Optional[NoArgAnyCallable] = None, + required: 'BoolUndefined' = Undefined, + final: bool = False, + alias: str = None, + field_info: Optional[FieldInfo] = None, + ) -> None: + + self.name: str = name + self.has_alias: bool = alias is not None + self.alias: str = alias if alias is not None else name + self.annotation = type_ + self.type_: Any = convert_generics(type_) + self.outer_type_: Any = type_ + self.class_validators = class_validators or {} + self.default: Any = default + self.default_factory: Optional[NoArgAnyCallable] = default_factory + self.required: 'BoolUndefined' = required + self.final: bool = final + self.model_config = model_config + self.field_info: FieldInfo = field_info or FieldInfo(default) + self.discriminator_key: Optional[str] = self.field_info.discriminator + self.discriminator_alias: Optional[str] = self.discriminator_key + + self.allow_none: bool = False + self.validate_always: bool = False + self.sub_fields: Optional[List[ModelField]] = None + self.sub_fields_mapping: Optional[Dict[str, 'ModelField']] = None # used for discriminated union + self.key_field: Optional[ModelField] = None + self.validators: 'ValidatorsList' = [] + self.pre_validators: Optional['ValidatorsList'] = None + self.post_validators: Optional['ValidatorsList'] = None + self.parse_json: bool = False + self.shape: int = SHAPE_SINGLETON + self.model_config.prepare_field(self) + self.prepare() + + def get_default(self) -> Any: + return smart_deepcopy(self.default) if self.default_factory is None else self.default_factory() + + @staticmethod + def _get_field_info( + field_name: str, annotation: Any, value: Any, config: Type['BaseConfig'] + ) -> Tuple[FieldInfo, Any]: + """ + Get a FieldInfo from a root typing.Annotated annotation, value, or config default. + + The FieldInfo may be set in typing.Annotated or the value, but not both. If neither contain + a FieldInfo, a new one will be created using the config. + + :param field_name: name of the field for use in error messages + :param annotation: a type hint such as `str` or `Annotated[str, Field(..., min_length=5)]` + :param value: the field's assigned value + :param config: the model's config object + :return: the FieldInfo contained in the `annotation`, the value, or a new one from the config. + """ + field_info_from_config = config.get_field_info(field_name) + + field_info = None + if get_origin(annotation) is Annotated: + field_infos = [arg for arg in get_args(annotation)[1:] if isinstance(arg, FieldInfo)] + if len(field_infos) > 1: + raise ValueError(f'cannot specify multiple `Annotated` `Field`s for {field_name!r}') + field_info = next(iter(field_infos), None) + if field_info is not None: + field_info = copy.copy(field_info) + field_info.update_from_config(field_info_from_config) + if field_info.default not in (Undefined, Required): + raise ValueError(f'`Field` default cannot be set in `Annotated` for {field_name!r}') + if value is not Undefined and value is not Required: + # check also `Required` because of `validate_arguments` that sets `...` as default value + field_info.default = value + + if isinstance(value, FieldInfo): + if field_info is not None: + raise ValueError(f'cannot specify `Annotated` and value `Field`s together for {field_name!r}') + field_info = value + field_info.update_from_config(field_info_from_config) + elif field_info is None: + field_info = FieldInfo(value, **field_info_from_config) + value = None if field_info.default_factory is not None else field_info.default + field_info._validate() + return field_info, value + + @classmethod + def infer( + cls, + *, + name: str, + value: Any, + annotation: Any, + class_validators: Optional[Dict[str, Validator]], + config: Type['BaseConfig'], + ) -> 'ModelField': + from .schema import get_annotation_from_field_info + + field_info, value = cls._get_field_info(name, annotation, value, config) + required: 'BoolUndefined' = Undefined + if value is Required: + required = True + value = None + elif value is not Undefined: + required = False + annotation = get_annotation_from_field_info(annotation, field_info, name, config.validate_assignment) + + return cls( + name=name, + type_=annotation, + alias=field_info.alias, + class_validators=class_validators, + default=value, + default_factory=field_info.default_factory, + required=required, + model_config=config, + field_info=field_info, + ) + + def set_config(self, config: Type['BaseConfig']) -> None: + self.model_config = config + info_from_config = config.get_field_info(self.name) + config.prepare_field(self) + new_alias = info_from_config.get('alias') + new_alias_priority = info_from_config.get('alias_priority') or 0 + if new_alias and new_alias_priority >= (self.field_info.alias_priority or 0): + self.field_info.alias = new_alias + self.field_info.alias_priority = new_alias_priority + self.alias = new_alias + new_exclude = info_from_config.get('exclude') + if new_exclude is not None: + self.field_info.exclude = ValueItems.merge(self.field_info.exclude, new_exclude) + new_include = info_from_config.get('include') + if new_include is not None: + self.field_info.include = ValueItems.merge(self.field_info.include, new_include, intersect=True) + + @property + def alt_alias(self) -> bool: + return self.name != self.alias + + def prepare(self) -> None: + """ + Prepare the field but inspecting self.default, self.type_ etc. + + Note: this method is **not** idempotent (because _type_analysis is not idempotent), + e.g. calling it it multiple times may modify the field and configure it incorrectly. + """ + self._set_default_and_type() + if self.type_.__class__ is ForwardRef or self.type_.__class__ is DeferredType: + # self.type_ is currently a ForwardRef and there's nothing we can do now, + # user will need to call model.update_forward_refs() + return + + self._type_analysis() + if self.required is Undefined: + self.required = True + if self.default is Undefined and self.default_factory is None: + self.default = None + self.populate_validators() + + def _set_default_and_type(self) -> None: + """ + Set the default value, infer the type if needed and check if `None` value is valid. + """ + if self.default_factory is not None: + if self.type_ is Undefined: + raise errors_.ConfigError( + f'you need to set the type of field {self.name!r} when using `default_factory`' + ) + return + + default_value = self.get_default() + + if default_value is not None and self.type_ is Undefined: + self.type_ = default_value.__class__ + self.outer_type_ = self.type_ + self.annotation = self.type_ + + if self.type_ is Undefined: + raise errors_.ConfigError(f'unable to infer type for attribute "{self.name}"') + + if self.required is False and default_value is None: + self.allow_none = True + + def _type_analysis(self) -> None: # noqa: C901 (ignore complexity) + # typing interface is horrible, we have to do some ugly checks + if lenient_issubclass(self.type_, JsonWrapper): + self.type_ = self.type_.inner_type + self.parse_json = True + elif lenient_issubclass(self.type_, Json): + self.type_ = Any + self.parse_json = True + elif isinstance(self.type_, TypeVar): + if self.type_.__bound__: + self.type_ = self.type_.__bound__ + elif self.type_.__constraints__: + self.type_ = Union[self.type_.__constraints__] + else: + self.type_ = Any + elif is_new_type(self.type_): + self.type_ = new_type_supertype(self.type_) + + if self.type_ is Any or self.type_ is object: + if self.required is Undefined: + self.required = False + self.allow_none = True + return + elif self.type_ is Pattern or self.type_ is re.Pattern: + # python 3.7 only, Pattern is a typing object but without sub fields + return + elif is_literal_type(self.type_): + return + elif is_typeddict(self.type_): + return + + if is_finalvar(self.type_): + self.final = True + + if self.type_ is Final: + self.type_ = Any + else: + self.type_ = get_args(self.type_)[0] + + self._type_analysis() + return + + origin = get_origin(self.type_) + + if origin is Annotated or is_typeddict_special(origin): + self.type_ = get_args(self.type_)[0] + self._type_analysis() + return + + if self.discriminator_key is not None and not is_union(origin): + raise TypeError('`discriminator` can only be used with `Union` type with more than one variant') + + # add extra check for `collections.abc.Hashable` for python 3.10+ where origin is not `None` + if origin is None or origin is CollectionsHashable: + # field is not "typing" object eg. Union, Dict, List etc. + # allow None for virtual superclasses of NoneType, e.g. Hashable + if isinstance(self.type_, type) and isinstance(None, self.type_): + self.allow_none = True + return + elif origin is Callable: + return + elif is_union(origin): + types_ = [] + for type_ in get_args(self.type_): + if is_none_type(type_) or type_ is Any or type_ is object: + if self.required is Undefined: + self.required = False + self.allow_none = True + if is_none_type(type_): + continue + types_.append(type_) + + if len(types_) == 1: + # Optional[] + self.type_ = types_[0] + # this is the one case where the "outer type" isn't just the original type + self.outer_type_ = self.type_ + # re-run to correctly interpret the new self.type_ + self._type_analysis() + else: + self.sub_fields = [self._create_sub_type(t, f'{self.name}_{display_as_type(t)}') for t in types_] + + if self.discriminator_key is not None: + self.prepare_discriminated_union_sub_fields() + return + elif issubclass(origin, Tuple): # type: ignore + # origin == Tuple without item type + args = get_args(self.type_) + if not args: # plain tuple + self.type_ = Any + self.shape = SHAPE_TUPLE_ELLIPSIS + elif len(args) == 2 and args[1] is Ellipsis: # e.g. Tuple[int, ...] + self.type_ = args[0] + self.shape = SHAPE_TUPLE_ELLIPSIS + self.sub_fields = [self._create_sub_type(args[0], f'{self.name}_0')] + elif args == ((),): # Tuple[()] means empty tuple + self.shape = SHAPE_TUPLE + self.type_ = Any + self.sub_fields = [] + else: + self.shape = SHAPE_TUPLE + self.sub_fields = [self._create_sub_type(t, f'{self.name}_{i}') for i, t in enumerate(args)] + return + elif issubclass(origin, List): + # Create self validators + get_validators = getattr(self.type_, '__get_validators__', None) + if get_validators: + self.class_validators.update( + {f'list_{i}': Validator(validator, pre=True) for i, validator in enumerate(get_validators())} + ) + + self.type_ = get_args(self.type_)[0] + self.shape = SHAPE_LIST + elif issubclass(origin, Set): + # Create self validators + get_validators = getattr(self.type_, '__get_validators__', None) + if get_validators: + self.class_validators.update( + {f'set_{i}': Validator(validator, pre=True) for i, validator in enumerate(get_validators())} + ) + + self.type_ = get_args(self.type_)[0] + self.shape = SHAPE_SET + elif issubclass(origin, FrozenSet): + # Create self validators + get_validators = getattr(self.type_, '__get_validators__', None) + if get_validators: + self.class_validators.update( + {f'frozenset_{i}': Validator(validator, pre=True) for i, validator in enumerate(get_validators())} + ) + + self.type_ = get_args(self.type_)[0] + self.shape = SHAPE_FROZENSET + elif issubclass(origin, Deque): + self.type_ = get_args(self.type_)[0] + self.shape = SHAPE_DEQUE + elif issubclass(origin, Sequence): + self.type_ = get_args(self.type_)[0] + self.shape = SHAPE_SEQUENCE + # priority to most common mapping: dict + elif origin is dict or origin is Dict: + self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True) + self.type_ = get_args(self.type_)[1] + self.shape = SHAPE_DICT + elif issubclass(origin, DefaultDict): + self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True) + self.type_ = get_args(self.type_)[1] + self.shape = SHAPE_DEFAULTDICT + elif issubclass(origin, Counter): + self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True) + self.type_ = int + self.shape = SHAPE_COUNTER + elif issubclass(origin, Mapping): + self.key_field = self._create_sub_type(get_args(self.type_)[0], 'key_' + self.name, for_keys=True) + self.type_ = get_args(self.type_)[1] + self.shape = SHAPE_MAPPING + # Equality check as almost everything inherits form Iterable, including str + # check for Iterable and CollectionsIterable, as it could receive one even when declared with the other + elif origin in {Iterable, CollectionsIterable}: + self.type_ = get_args(self.type_)[0] + self.shape = SHAPE_ITERABLE + self.sub_fields = [self._create_sub_type(self.type_, f'{self.name}_type')] + elif issubclass(origin, Type): # type: ignore + return + elif hasattr(origin, '__get_validators__') or self.model_config.arbitrary_types_allowed: + # Is a Pydantic-compatible generic that handles itself + # or we have arbitrary_types_allowed = True + self.shape = SHAPE_GENERIC + self.sub_fields = [self._create_sub_type(t, f'{self.name}_{i}') for i, t in enumerate(get_args(self.type_))] + self.type_ = origin + return + else: + raise TypeError(f'Fields of type "{origin}" are not supported.') + + # type_ has been refined eg. as the type of a List and sub_fields needs to be populated + self.sub_fields = [self._create_sub_type(self.type_, '_' + self.name)] + + def prepare_discriminated_union_sub_fields(self) -> None: + """ + Prepare the mapping -> and update `sub_fields` + Note that this process can be aborted if a `ForwardRef` is encountered + """ + assert self.discriminator_key is not None + + if self.type_.__class__ is DeferredType: + return + + assert self.sub_fields is not None + sub_fields_mapping: Dict[str, 'ModelField'] = {} + all_aliases: Set[str] = set() + + for sub_field in self.sub_fields: + t = sub_field.type_ + if t.__class__ is ForwardRef: + # Stopping everything...will need to call `update_forward_refs` + return + + alias, discriminator_values = get_discriminator_alias_and_values(t, self.discriminator_key) + all_aliases.add(alias) + for discriminator_value in discriminator_values: + sub_fields_mapping[discriminator_value] = sub_field + + self.sub_fields_mapping = sub_fields_mapping + self.discriminator_alias = get_unique_discriminator_alias(all_aliases, self.discriminator_key) + + def _create_sub_type(self, type_: Type[Any], name: str, *, for_keys: bool = False) -> 'ModelField': + if for_keys: + class_validators = None + else: + # validators for sub items should not have `each_item` as we want to check only the first sublevel + class_validators = { + k: Validator( + func=v.func, + pre=v.pre, + each_item=False, + always=v.always, + check_fields=v.check_fields, + skip_on_failure=v.skip_on_failure, + ) + for k, v in self.class_validators.items() + if v.each_item + } + + field_info, _ = self._get_field_info(name, type_, None, self.model_config) + + return self.__class__( + type_=type_, + name=name, + class_validators=class_validators, + model_config=self.model_config, + field_info=field_info, + ) + + def populate_validators(self) -> None: + """ + Prepare self.pre_validators, self.validators, and self.post_validators based on self.type_'s __get_validators__ + and class validators. This method should be idempotent, e.g. it should be safe to call multiple times + without mis-configuring the field. + """ + self.validate_always = getattr(self.type_, 'validate_always', False) or any( + v.always for v in self.class_validators.values() + ) + + class_validators_ = self.class_validators.values() + if not self.sub_fields or self.shape == SHAPE_GENERIC: + get_validators = getattr(self.type_, '__get_validators__', None) + v_funcs = ( + *[v.func for v in class_validators_ if v.each_item and v.pre], + *(get_validators() if get_validators else list(find_validators(self.type_, self.model_config))), + *[v.func for v in class_validators_ if v.each_item and not v.pre], + ) + self.validators = prep_validators(v_funcs) + + self.pre_validators = [] + self.post_validators = [] + + if self.field_info and self.field_info.const: + self.post_validators.append(make_generic_validator(constant_validator)) + + if class_validators_: + self.pre_validators += prep_validators(v.func for v in class_validators_ if not v.each_item and v.pre) + self.post_validators += prep_validators(v.func for v in class_validators_ if not v.each_item and not v.pre) + + if self.parse_json: + self.pre_validators.append(make_generic_validator(validate_json)) + + self.pre_validators = self.pre_validators or None + self.post_validators = self.post_validators or None + + def validate( + self, v: Any, values: Dict[str, Any], *, loc: 'LocStr', cls: Optional['ModelOrDc'] = None + ) -> 'ValidateReturn': + + assert self.type_.__class__ is not DeferredType + + if self.type_.__class__ is ForwardRef: + assert cls is not None + raise ConfigError( + f'field "{self.name}" not yet prepared so type is still a ForwardRef, ' + f'you might need to call {cls.__name__}.update_forward_refs().' + ) + + errors: Optional['ErrorList'] + if self.pre_validators: + v, errors = self._apply_validators(v, values, loc, cls, self.pre_validators) + if errors: + return v, errors + + if v is None: + if is_none_type(self.type_): + # keep validating + pass + elif self.allow_none: + if self.post_validators: + return self._apply_validators(v, values, loc, cls, self.post_validators) + else: + return None, None + else: + return v, ErrorWrapper(NoneIsNotAllowedError(), loc) + + if self.shape == SHAPE_SINGLETON: + v, errors = self._validate_singleton(v, values, loc, cls) + elif self.shape in MAPPING_LIKE_SHAPES: + v, errors = self._validate_mapping_like(v, values, loc, cls) + elif self.shape == SHAPE_TUPLE: + v, errors = self._validate_tuple(v, values, loc, cls) + elif self.shape == SHAPE_ITERABLE: + v, errors = self._validate_iterable(v, values, loc, cls) + elif self.shape == SHAPE_GENERIC: + v, errors = self._apply_validators(v, values, loc, cls, self.validators) + else: + # sequence, list, set, generator, tuple with ellipsis, frozen set + v, errors = self._validate_sequence_like(v, values, loc, cls) + + if not errors and self.post_validators: + v, errors = self._apply_validators(v, values, loc, cls, self.post_validators) + return v, errors + + def _validate_sequence_like( # noqa: C901 (ignore complexity) + self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc'] + ) -> 'ValidateReturn': + """ + Validate sequence-like containers: lists, tuples, sets and generators + Note that large if-else blocks are necessary to enable Cython + optimization, which is why we disable the complexity check above. + """ + if not sequence_like(v): + e: errors_.PydanticTypeError + if self.shape == SHAPE_LIST: + e = errors_.ListError() + elif self.shape in (SHAPE_TUPLE, SHAPE_TUPLE_ELLIPSIS): + e = errors_.TupleError() + elif self.shape == SHAPE_SET: + e = errors_.SetError() + elif self.shape == SHAPE_FROZENSET: + e = errors_.FrozenSetError() + else: + e = errors_.SequenceError() + return v, ErrorWrapper(e, loc) + + loc = loc if isinstance(loc, tuple) else (loc,) + result = [] + errors: List[ErrorList] = [] + for i, v_ in enumerate(v): + v_loc = *loc, i + r, ee = self._validate_singleton(v_, values, v_loc, cls) + if ee: + errors.append(ee) + else: + result.append(r) + + if errors: + return v, errors + + converted: Union[List[Any], Set[Any], FrozenSet[Any], Tuple[Any, ...], Iterator[Any], Deque[Any]] = result + + if self.shape == SHAPE_SET: + converted = set(result) + elif self.shape == SHAPE_FROZENSET: + converted = frozenset(result) + elif self.shape == SHAPE_TUPLE_ELLIPSIS: + converted = tuple(result) + elif self.shape == SHAPE_DEQUE: + converted = deque(result) + elif self.shape == SHAPE_SEQUENCE: + if isinstance(v, tuple): + converted = tuple(result) + elif isinstance(v, set): + converted = set(result) + elif isinstance(v, Generator): + converted = iter(result) + elif isinstance(v, deque): + converted = deque(result) + return converted, None + + def _validate_iterable( + self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc'] + ) -> 'ValidateReturn': + """ + Validate Iterables. + + This intentionally doesn't validate values to allow infinite generators. + """ + + try: + iterable = iter(v) + except TypeError: + return v, ErrorWrapper(errors_.IterableError(), loc) + return iterable, None + + def _validate_tuple( + self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc'] + ) -> 'ValidateReturn': + e: Optional[Exception] = None + if not sequence_like(v): + e = errors_.TupleError() + else: + actual_length, expected_length = len(v), len(self.sub_fields) # type: ignore + if actual_length != expected_length: + e = errors_.TupleLengthError(actual_length=actual_length, expected_length=expected_length) + + if e: + return v, ErrorWrapper(e, loc) + + loc = loc if isinstance(loc, tuple) else (loc,) + result = [] + errors: List[ErrorList] = [] + for i, (v_, field) in enumerate(zip(v, self.sub_fields)): # type: ignore + v_loc = *loc, i + r, ee = field.validate(v_, values, loc=v_loc, cls=cls) + if ee: + errors.append(ee) + else: + result.append(r) + + if errors: + return v, errors + else: + return tuple(result), None + + def _validate_mapping_like( + self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc'] + ) -> 'ValidateReturn': + try: + v_iter = dict_validator(v) + except TypeError as exc: + return v, ErrorWrapper(exc, loc) + + loc = loc if isinstance(loc, tuple) else (loc,) + result, errors = {}, [] + for k, v_ in v_iter.items(): + v_loc = *loc, '__key__' + key_result, key_errors = self.key_field.validate(k, values, loc=v_loc, cls=cls) # type: ignore + if key_errors: + errors.append(key_errors) + continue + + v_loc = *loc, k + value_result, value_errors = self._validate_singleton(v_, values, v_loc, cls) + if value_errors: + errors.append(value_errors) + continue + + result[key_result] = value_result + if errors: + return v, errors + elif self.shape == SHAPE_DICT: + return result, None + elif self.shape == SHAPE_DEFAULTDICT: + return defaultdict(self.type_, result), None + elif self.shape == SHAPE_COUNTER: + return CollectionCounter(result), None + else: + return self._get_mapping_value(v, result), None + + def _get_mapping_value(self, original: T, converted: Dict[Any, Any]) -> Union[T, Dict[Any, Any]]: + """ + When type is `Mapping[KT, KV]` (or another unsupported mapping), we try to avoid + coercing to `dict` unwillingly. + """ + original_cls = original.__class__ + + if original_cls == dict or original_cls == Dict: + return converted + elif original_cls in {defaultdict, DefaultDict}: + return defaultdict(self.type_, converted) + else: + try: + # Counter, OrderedDict, UserDict, ... + return original_cls(converted) # type: ignore + except TypeError: + raise RuntimeError(f'Could not convert dictionary to {original_cls.__name__!r}') from None + + def _validate_singleton( + self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc'] + ) -> 'ValidateReturn': + if self.sub_fields: + if self.discriminator_key is not None: + return self._validate_discriminated_union(v, values, loc, cls) + + errors = [] + + if self.model_config.smart_union and is_union(get_origin(self.type_)): + # 1st pass: check if the value is an exact instance of one of the Union types + # (e.g. to avoid coercing a bool into an int) + for field in self.sub_fields: + if v.__class__ is field.outer_type_: + return v, None + + # 2nd pass: check if the value is an instance of any subclass of the Union types + for field in self.sub_fields: + # This whole logic will be improved later on to support more complex `isinstance` checks + # It will probably be done once a strict mode is added and be something like: + # ``` + # value, error = field.validate(v, values, strict=True) + # if error is None: + # return value, None + # ``` + try: + if isinstance(v, field.outer_type_): + return v, None + except TypeError: + # compound type + if lenient_isinstance(v, get_origin(field.outer_type_)): + value, error = field.validate(v, values, loc=loc, cls=cls) + if not error: + return value, None + + # 1st pass by default or 3rd pass with `smart_union` enabled: + # check if the value can be coerced into one of the Union types + for field in self.sub_fields: + value, error = field.validate(v, values, loc=loc, cls=cls) + if error: + errors.append(error) + else: + return value, None + return v, errors + else: + return self._apply_validators(v, values, loc, cls, self.validators) + + def _validate_discriminated_union( + self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc'] + ) -> 'ValidateReturn': + assert self.discriminator_key is not None + assert self.discriminator_alias is not None + + try: + discriminator_value = v[self.discriminator_alias] + except KeyError: + return v, ErrorWrapper(MissingDiscriminator(discriminator_key=self.discriminator_key), loc) + except TypeError: + try: + # BaseModel or dataclass + discriminator_value = getattr(v, self.discriminator_key) + except (AttributeError, TypeError): + return v, ErrorWrapper(MissingDiscriminator(discriminator_key=self.discriminator_key), loc) + + try: + sub_field = self.sub_fields_mapping[discriminator_value] # type: ignore[index] + except TypeError: + assert cls is not None + raise ConfigError( + f'field "{self.name}" not yet prepared so type is still a ForwardRef, ' + f'you might need to call {cls.__name__}.update_forward_refs().' + ) + except KeyError: + assert self.sub_fields_mapping is not None + return v, ErrorWrapper( + InvalidDiscriminator( + discriminator_key=self.discriminator_key, + discriminator_value=discriminator_value, + allowed_values=list(self.sub_fields_mapping), + ), + loc, + ) + else: + if not isinstance(loc, tuple): + loc = (loc,) + return sub_field.validate(v, values, loc=(*loc, display_as_type(sub_field.type_)), cls=cls) + + def _apply_validators( + self, v: Any, values: Dict[str, Any], loc: 'LocStr', cls: Optional['ModelOrDc'], validators: 'ValidatorsList' + ) -> 'ValidateReturn': + for validator in validators: + try: + v = validator(cls, v, values, self, self.model_config) + except (ValueError, TypeError, AssertionError) as exc: + return v, ErrorWrapper(exc, loc) + return v, None + + def is_complex(self) -> bool: + """ + Whether the field is "complex" eg. env variables should be parsed as JSON. + """ + from .main import BaseModel + + return ( + self.shape != SHAPE_SINGLETON + or hasattr(self.type_, '__pydantic_model__') + or lenient_issubclass(self.type_, (BaseModel, list, set, frozenset, dict)) + ) + + def _type_display(self) -> PyObjectStr: + t = display_as_type(self.type_) + + if self.shape in MAPPING_LIKE_SHAPES: + t = f'Mapping[{display_as_type(self.key_field.type_)}, {t}]' # type: ignore + elif self.shape == SHAPE_TUPLE: + t = 'Tuple[{}]'.format(', '.join(display_as_type(f.type_) for f in self.sub_fields)) # type: ignore + elif self.shape == SHAPE_GENERIC: + assert self.sub_fields + t = '{}[{}]'.format( + display_as_type(self.type_), ', '.join(display_as_type(f.type_) for f in self.sub_fields) + ) + elif self.shape != SHAPE_SINGLETON: + t = SHAPE_NAME_LOOKUP[self.shape].format(t) + + if self.allow_none and (self.shape != SHAPE_SINGLETON or not self.sub_fields): + t = f'Optional[{t}]' + return PyObjectStr(t) + + def __repr_args__(self) -> 'ReprArgs': + args = [('name', self.name), ('type', self._type_display()), ('required', self.required)] + + if not self.required: + if self.default_factory is not None: + args.append(('default_factory', f'')) + else: + args.append(('default', self.default)) + + if self.alt_alias: + args.append(('alias', self.alias)) + return args + + +class ModelPrivateAttr(Representation): + __slots__ = ('default', 'default_factory') + + def __init__(self, default: Any = Undefined, *, default_factory: Optional[NoArgAnyCallable] = None) -> None: + self.default = default + self.default_factory = default_factory + + def get_default(self) -> Any: + return smart_deepcopy(self.default) if self.default_factory is None else self.default_factory() + + def __eq__(self, other: Any) -> bool: + return isinstance(other, self.__class__) and (self.default, self.default_factory) == ( + other.default, + other.default_factory, + ) + + +def PrivateAttr( + default: Any = Undefined, + *, + default_factory: Optional[NoArgAnyCallable] = None, +) -> Any: + """ + Indicates that attribute is only used internally and never mixed with regular fields. + + Types or values of private attrs are not checked by pydantic and it's up to you to keep them relevant. + + Private attrs are stored in model __slots__. + + :param default: the attribute’s default value + :param default_factory: callable that will be called when a default value is needed for this attribute + If both `default` and `default_factory` are set, an error is raised. + """ + if default is not Undefined and default_factory is not None: + raise ValueError('cannot specify both default and default_factory') + + return ModelPrivateAttr( + default, + default_factory=default_factory, + ) + + +class DeferredType: + """ + Used to postpone field preparation, while creating recursive generic models. + """ + + +def is_finalvar_with_default_val(type_: Type[Any], val: Any) -> bool: + return is_finalvar(type_) and val is not Undefined and not isinstance(val, FieldInfo) diff --git a/lib/pydantic/generics.py b/lib/pydantic/generics.py new file mode 100644 index 00000000..a3f52bfe --- /dev/null +++ b/lib/pydantic/generics.py @@ -0,0 +1,364 @@ +import sys +import typing +from typing import ( + TYPE_CHECKING, + Any, + ClassVar, + Dict, + Generic, + Iterator, + List, + Mapping, + Optional, + Tuple, + Type, + TypeVar, + Union, + cast, +) + +from typing_extensions import Annotated + +from .class_validators import gather_all_validators +from .fields import DeferredType +from .main import BaseModel, create_model +from .types import JsonWrapper +from .typing import display_as_type, get_all_type_hints, get_args, get_origin, typing_base +from .utils import LimitedDict, all_identical, lenient_issubclass + +GenericModelT = TypeVar('GenericModelT', bound='GenericModel') +TypeVarType = Any # since mypy doesn't allow the use of TypeVar as a type + +Parametrization = Mapping[TypeVarType, Type[Any]] + +_generic_types_cache: LimitedDict[Tuple[Type[Any], Union[Any, Tuple[Any, ...]]], Type[BaseModel]] = LimitedDict() +# _assigned_parameters is a Mapping from parametrized version of generic models to assigned types of parametrizations +# as captured during construction of the class (not instances). +# E.g., for generic model `Model[A, B]`, when parametrized model `Model[int, str]` is created, +# `Model[int, str]`: {A: int, B: str}` will be stored in `_assigned_parameters`. +# (This information is only otherwise available after creation from the class name string). +_assigned_parameters: LimitedDict[Type[Any], Parametrization] = LimitedDict() + + +class GenericModel(BaseModel): + __slots__ = () + __concrete__: ClassVar[bool] = False + + if TYPE_CHECKING: + # Putting this in a TYPE_CHECKING block allows us to replace `if Generic not in cls.__bases__` with + # `not hasattr(cls, "__parameters__")`. This means we don't need to force non-concrete subclasses of + # `GenericModel` to also inherit from `Generic`, which would require changes to the use of `create_model` below. + __parameters__: ClassVar[Tuple[TypeVarType, ...]] + + # Setting the return type as Type[Any] instead of Type[BaseModel] prevents PyCharm warnings + def __class_getitem__(cls: Type[GenericModelT], params: Union[Type[Any], Tuple[Type[Any], ...]]) -> Type[Any]: + """Instantiates a new class from a generic class `cls` and type variables `params`. + + :param params: Tuple of types the class . Given a generic class + `Model` with 2 type variables and a concrete model `Model[str, int]`, + the value `(str, int)` would be passed to `params`. + :return: New model class inheriting from `cls` with instantiated + types described by `params`. If no parameters are given, `cls` is + returned as is. + + """ + + def _cache_key(_params: Any) -> Tuple[Type[GenericModelT], Any, Tuple[Any, ...]]: + return cls, _params, get_args(_params) + + cached = _generic_types_cache.get(_cache_key(params)) + if cached is not None: + return cached + if cls.__concrete__ and Generic not in cls.__bases__: + raise TypeError('Cannot parameterize a concrete instantiation of a generic model') + if not isinstance(params, tuple): + params = (params,) + if cls is GenericModel and any(isinstance(param, TypeVar) for param in params): + raise TypeError('Type parameters should be placed on typing.Generic, not GenericModel') + if not hasattr(cls, '__parameters__'): + raise TypeError(f'Type {cls.__name__} must inherit from typing.Generic before being parameterized') + + check_parameters_count(cls, params) + # Build map from generic typevars to passed params + typevars_map: Dict[TypeVarType, Type[Any]] = dict(zip(cls.__parameters__, params)) + if all_identical(typevars_map.keys(), typevars_map.values()) and typevars_map: + return cls # if arguments are equal to parameters it's the same object + + # Create new model with original model as parent inserting fields with DeferredType. + model_name = cls.__concrete_name__(params) + validators = gather_all_validators(cls) + + type_hints = get_all_type_hints(cls).items() + instance_type_hints = {k: v for k, v in type_hints if get_origin(v) is not ClassVar} + + fields = {k: (DeferredType(), cls.__fields__[k].field_info) for k in instance_type_hints if k in cls.__fields__} + + model_module, called_globally = get_caller_frame_info() + created_model = cast( + Type[GenericModel], # casting ensures mypy is aware of the __concrete__ and __parameters__ attributes + create_model( + model_name, + __module__=model_module or cls.__module__, + __base__=(cls,) + tuple(cls.__parameterized_bases__(typevars_map)), + __config__=None, + __validators__=validators, + __cls_kwargs__=None, + **fields, + ), + ) + + _assigned_parameters[created_model] = typevars_map + + if called_globally: # create global reference and therefore allow pickling + object_by_reference = None + reference_name = model_name + reference_module_globals = sys.modules[created_model.__module__].__dict__ + while object_by_reference is not created_model: + object_by_reference = reference_module_globals.setdefault(reference_name, created_model) + reference_name += '_' + + created_model.Config = cls.Config + + # Find any typevars that are still present in the model. + # If none are left, the model is fully "concrete", otherwise the new + # class is a generic class as well taking the found typevars as + # parameters. + new_params = tuple( + {param: None for param in iter_contained_typevars(typevars_map.values())} + ) # use dict as ordered set + created_model.__concrete__ = not new_params + if new_params: + created_model.__parameters__ = new_params + + # Save created model in cache so we don't end up creating duplicate + # models that should be identical. + _generic_types_cache[_cache_key(params)] = created_model + if len(params) == 1: + _generic_types_cache[_cache_key(params[0])] = created_model + + # Recursively walk class type hints and replace generic typevars + # with concrete types that were passed. + _prepare_model_fields(created_model, fields, instance_type_hints, typevars_map) + + return created_model + + @classmethod + def __concrete_name__(cls: Type[Any], params: Tuple[Type[Any], ...]) -> str: + """Compute class name for child classes. + + :param params: Tuple of types the class . Given a generic class + `Model` with 2 type variables and a concrete model `Model[str, int]`, + the value `(str, int)` would be passed to `params`. + :return: String representing a the new class where `params` are + passed to `cls` as type variables. + + This method can be overridden to achieve a custom naming scheme for GenericModels. + """ + param_names = [display_as_type(param) for param in params] + params_component = ', '.join(param_names) + return f'{cls.__name__}[{params_component}]' + + @classmethod + def __parameterized_bases__(cls, typevars_map: Parametrization) -> Iterator[Type[Any]]: + """ + Returns unbound bases of cls parameterised to given type variables + + :param typevars_map: Dictionary of type applications for binding subclasses. + Given a generic class `Model` with 2 type variables [S, T] + and a concrete model `Model[str, int]`, + the value `{S: str, T: int}` would be passed to `typevars_map`. + :return: an iterator of generic sub classes, parameterised by `typevars_map` + and other assigned parameters of `cls` + + e.g.: + ``` + class A(GenericModel, Generic[T]): + ... + + class B(A[V], Generic[V]): + ... + + assert A[int] in B.__parameterized_bases__({V: int}) + ``` + """ + + def build_base_model( + base_model: Type[GenericModel], mapped_types: Parametrization + ) -> Iterator[Type[GenericModel]]: + base_parameters = tuple(mapped_types[param] for param in base_model.__parameters__) + parameterized_base = base_model.__class_getitem__(base_parameters) + if parameterized_base is base_model or parameterized_base is cls: + # Avoid duplication in MRO + return + yield parameterized_base + + for base_model in cls.__bases__: + if not issubclass(base_model, GenericModel): + # not a class that can be meaningfully parameterized + continue + elif not getattr(base_model, '__parameters__', None): + # base_model is "GenericModel" (and has no __parameters__) + # or + # base_model is already concrete, and will be included transitively via cls. + continue + elif cls in _assigned_parameters: + if base_model in _assigned_parameters: + # cls is partially parameterised but not from base_model + # e.g. cls = B[S], base_model = A[S] + # B[S][int] should subclass A[int], (and will be transitively via B[int]) + # but it's not viable to consistently subclass types with arbitrary construction + # So don't attempt to include A[S][int] + continue + else: # base_model not in _assigned_parameters: + # cls is partially parameterized, base_model is original generic + # e.g. cls = B[str, T], base_model = B[S, T] + # Need to determine the mapping for the base_model parameters + mapped_types: Parametrization = { + key: typevars_map.get(value, value) for key, value in _assigned_parameters[cls].items() + } + yield from build_base_model(base_model, mapped_types) + else: + # cls is base generic, so base_class has a distinct base + # can construct the Parameterised base model using typevars_map directly + yield from build_base_model(base_model, typevars_map) + + +def replace_types(type_: Any, type_map: Mapping[Any, Any]) -> Any: + """Return type with all occurrences of `type_map` keys recursively replaced with their values. + + :param type_: Any type, class or generic alias + :param type_map: Mapping from `TypeVar` instance to concrete types. + :return: New type representing the basic structure of `type_` with all + `typevar_map` keys recursively replaced. + + >>> replace_types(Tuple[str, Union[List[str], float]], {str: int}) + Tuple[int, Union[List[int], float]] + + """ + if not type_map: + return type_ + + type_args = get_args(type_) + origin_type = get_origin(type_) + + if origin_type is Annotated: + annotated_type, *annotations = type_args + return Annotated[replace_types(annotated_type, type_map), tuple(annotations)] + + # Having type args is a good indicator that this is a typing module + # class instantiation or a generic alias of some sort. + if type_args: + resolved_type_args = tuple(replace_types(arg, type_map) for arg in type_args) + if all_identical(type_args, resolved_type_args): + # If all arguments are the same, there is no need to modify the + # type or create a new object at all + return type_ + if ( + origin_type is not None + and isinstance(type_, typing_base) + and not isinstance(origin_type, typing_base) + and getattr(type_, '_name', None) is not None + ): + # In python < 3.9 generic aliases don't exist so any of these like `list`, + # `type` or `collections.abc.Callable` need to be translated. + # See: https://www.python.org/dev/peps/pep-0585 + origin_type = getattr(typing, type_._name) + assert origin_type is not None + return origin_type[resolved_type_args] + + # We handle pydantic generic models separately as they don't have the same + # semantics as "typing" classes or generic aliases + if not origin_type and lenient_issubclass(type_, GenericModel) and not type_.__concrete__: + type_args = type_.__parameters__ + resolved_type_args = tuple(replace_types(t, type_map) for t in type_args) + if all_identical(type_args, resolved_type_args): + return type_ + return type_[resolved_type_args] + + # Handle special case for typehints that can have lists as arguments. + # `typing.Callable[[int, str], int]` is an example for this. + if isinstance(type_, (List, list)): + resolved_list = list(replace_types(element, type_map) for element in type_) + if all_identical(type_, resolved_list): + return type_ + return resolved_list + + # For JsonWrapperValue, need to handle its inner type to allow correct parsing + # of generic Json arguments like Json[T] + if not origin_type and lenient_issubclass(type_, JsonWrapper): + type_.inner_type = replace_types(type_.inner_type, type_map) + return type_ + + # If all else fails, we try to resolve the type directly and otherwise just + # return the input with no modifications. + return type_map.get(type_, type_) + + +def check_parameters_count(cls: Type[GenericModel], parameters: Tuple[Any, ...]) -> None: + actual = len(parameters) + expected = len(cls.__parameters__) + if actual != expected: + description = 'many' if actual > expected else 'few' + raise TypeError(f'Too {description} parameters for {cls.__name__}; actual {actual}, expected {expected}') + + +DictValues: Type[Any] = {}.values().__class__ + + +def iter_contained_typevars(v: Any) -> Iterator[TypeVarType]: + """Recursively iterate through all subtypes and type args of `v` and yield any typevars that are found.""" + if isinstance(v, TypeVar): + yield v + elif hasattr(v, '__parameters__') and not get_origin(v) and lenient_issubclass(v, GenericModel): + yield from v.__parameters__ + elif isinstance(v, (DictValues, list)): + for var in v: + yield from iter_contained_typevars(var) + else: + args = get_args(v) + for arg in args: + yield from iter_contained_typevars(arg) + + +def get_caller_frame_info() -> Tuple[Optional[str], bool]: + """ + Used inside a function to check whether it was called globally + + Will only work against non-compiled code, therefore used only in pydantic.generics + + :returns Tuple[module_name, called_globally] + """ + try: + previous_caller_frame = sys._getframe(2) + except ValueError as e: + raise RuntimeError('This function must be used inside another function') from e + except AttributeError: # sys module does not have _getframe function, so there's nothing we can do about it + return None, False + frame_globals = previous_caller_frame.f_globals + return frame_globals.get('__name__'), previous_caller_frame.f_locals is frame_globals + + +def _prepare_model_fields( + created_model: Type[GenericModel], + fields: Mapping[str, Any], + instance_type_hints: Mapping[str, type], + typevars_map: Mapping[Any, type], +) -> None: + """ + Replace DeferredType fields with concrete type hints and prepare them. + """ + + for key, field in created_model.__fields__.items(): + if key not in fields: + assert field.type_.__class__ is not DeferredType + # https://github.com/nedbat/coveragepy/issues/198 + continue # pragma: no cover + + assert field.type_.__class__ is DeferredType, field.type_.__class__ + + field_type_hint = instance_type_hints[key] + concrete_type = replace_types(field_type_hint, typevars_map) + field.type_ = concrete_type + field.outer_type_ = concrete_type + field.prepare() + created_model.__annotations__[key] = concrete_type diff --git a/lib/pydantic/json.py b/lib/pydantic/json.py new file mode 100644 index 00000000..b358b850 --- /dev/null +++ b/lib/pydantic/json.py @@ -0,0 +1,112 @@ +import datetime +from collections import deque +from decimal import Decimal +from enum import Enum +from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network +from pathlib import Path +from re import Pattern +from types import GeneratorType +from typing import Any, Callable, Dict, Type, Union +from uuid import UUID + +from .color import Color +from .networks import NameEmail +from .types import SecretBytes, SecretStr + +__all__ = 'pydantic_encoder', 'custom_pydantic_encoder', 'timedelta_isoformat' + + +def isoformat(o: Union[datetime.date, datetime.time]) -> str: + return o.isoformat() + + +def decimal_encoder(dec_value: Decimal) -> Union[int, float]: + """ + Encodes a Decimal as int of there's no exponent, otherwise float + + This is useful when we use ConstrainedDecimal to represent Numeric(x,0) + where a integer (but not int typed) is used. Encoding this as a float + results in failed round-tripping between encode and parse. + Our Id type is a prime example of this. + + >>> decimal_encoder(Decimal("1.0")) + 1.0 + + >>> decimal_encoder(Decimal("1")) + 1 + """ + if dec_value.as_tuple().exponent >= 0: + return int(dec_value) + else: + return float(dec_value) + + +ENCODERS_BY_TYPE: Dict[Type[Any], Callable[[Any], Any]] = { + bytes: lambda o: o.decode(), + Color: str, + datetime.date: isoformat, + datetime.datetime: isoformat, + datetime.time: isoformat, + datetime.timedelta: lambda td: td.total_seconds(), + Decimal: decimal_encoder, + Enum: lambda o: o.value, + frozenset: list, + deque: list, + GeneratorType: list, + IPv4Address: str, + IPv4Interface: str, + IPv4Network: str, + IPv6Address: str, + IPv6Interface: str, + IPv6Network: str, + NameEmail: str, + Path: str, + Pattern: lambda o: o.pattern, + SecretBytes: str, + SecretStr: str, + set: list, + UUID: str, +} + + +def pydantic_encoder(obj: Any) -> Any: + from dataclasses import asdict, is_dataclass + + from .main import BaseModel + + if isinstance(obj, BaseModel): + return obj.dict() + elif is_dataclass(obj): + return asdict(obj) + + # Check the class type and its superclasses for a matching encoder + for base in obj.__class__.__mro__[:-1]: + try: + encoder = ENCODERS_BY_TYPE[base] + except KeyError: + continue + return encoder(obj) + else: # We have exited the for loop without finding a suitable encoder + raise TypeError(f"Object of type '{obj.__class__.__name__}' is not JSON serializable") + + +def custom_pydantic_encoder(type_encoders: Dict[Any, Callable[[Type[Any]], Any]], obj: Any) -> Any: + # Check the class type and its superclasses for a matching encoder + for base in obj.__class__.__mro__[:-1]: + try: + encoder = type_encoders[base] + except KeyError: + continue + + return encoder(obj) + else: # We have exited the for loop without finding a suitable encoder + return pydantic_encoder(obj) + + +def timedelta_isoformat(td: datetime.timedelta) -> str: + """ + ISO 8601 encoding for Python timedelta object. + """ + minutes, seconds = divmod(td.seconds, 60) + hours, minutes = divmod(minutes, 60) + return f'{"-" if td.days < 0 else ""}P{abs(td.days)}DT{hours:d}H{minutes:d}M{seconds:d}.{td.microseconds:06d}S' diff --git a/lib/pydantic/main.py b/lib/pydantic/main.py new file mode 100644 index 00000000..69f3b751 --- /dev/null +++ b/lib/pydantic/main.py @@ -0,0 +1,1109 @@ +import warnings +from abc import ABCMeta +from copy import deepcopy +from enum import Enum +from functools import partial +from pathlib import Path +from types import FunctionType, prepare_class, resolve_bases +from typing import ( + TYPE_CHECKING, + AbstractSet, + Any, + Callable, + ClassVar, + Dict, + List, + Mapping, + Optional, + Tuple, + Type, + TypeVar, + Union, + cast, + no_type_check, + overload, +) + +from typing_extensions import dataclass_transform + +from .class_validators import ValidatorGroup, extract_root_validators, extract_validators, inherit_validators +from .config import BaseConfig, Extra, inherit_config, prepare_config +from .error_wrappers import ErrorWrapper, ValidationError +from .errors import ConfigError, DictError, ExtraError, MissingError +from .fields import ( + MAPPING_LIKE_SHAPES, + Field, + FieldInfo, + ModelField, + ModelPrivateAttr, + PrivateAttr, + Undefined, + is_finalvar_with_default_val, +) +from .json import custom_pydantic_encoder, pydantic_encoder +from .parse import Protocol, load_file, load_str_bytes +from .schema import default_ref_template, model_schema +from .types import PyObject, StrBytes +from .typing import ( + AnyCallable, + get_args, + get_origin, + is_classvar, + is_namedtuple, + is_union, + resolve_annotations, + update_model_forward_refs, +) +from .utils import ( + DUNDER_ATTRIBUTES, + ROOT_KEY, + ClassAttribute, + GetterDict, + Representation, + ValueItems, + generate_model_signature, + is_valid_field, + is_valid_private_name, + lenient_issubclass, + sequence_like, + smart_deepcopy, + unique_list, + validate_field_name, +) + +if TYPE_CHECKING: + from inspect import Signature + + from .class_validators import ValidatorListDict + from .types import ModelOrDc + from .typing import ( + AbstractSetIntStr, + AnyClassMethod, + CallableGenerator, + DictAny, + DictStrAny, + MappingIntStrAny, + ReprArgs, + SetStr, + TupleGenerator, + ) + + Model = TypeVar('Model', bound='BaseModel') + +__all__ = 'BaseModel', 'create_model', 'validate_model' + +_T = TypeVar('_T') + + +def validate_custom_root_type(fields: Dict[str, ModelField]) -> None: + if len(fields) > 1: + raise ValueError(f'{ROOT_KEY} cannot be mixed with other fields') + + +def generate_hash_function(frozen: bool) -> Optional[Callable[[Any], int]]: + def hash_function(self_: Any) -> int: + return hash(self_.__class__) + hash(tuple(self_.__dict__.values())) + + return hash_function if frozen else None + + +# If a field is of type `Callable`, its default value should be a function and cannot to ignored. +ANNOTATED_FIELD_UNTOUCHED_TYPES: Tuple[Any, ...] = (property, type, classmethod, staticmethod) +# When creating a `BaseModel` instance, we bypass all the methods, properties... added to the model +UNTOUCHED_TYPES: Tuple[Any, ...] = (FunctionType,) + ANNOTATED_FIELD_UNTOUCHED_TYPES +# Note `ModelMetaclass` refers to `BaseModel`, but is also used to *create* `BaseModel`, so we need to add this extra +# (somewhat hacky) boolean to keep track of whether we've created the `BaseModel` class yet, and therefore whether it's +# safe to refer to it. If it *hasn't* been created, we assume that the `__new__` call we're in the middle of is for +# the `BaseModel` class, since that's defined immediately after the metaclass. +_is_base_model_class_defined = False + + +@dataclass_transform(kw_only_default=True, field_descriptors=(Field, FieldInfo)) +class ModelMetaclass(ABCMeta): + @no_type_check # noqa C901 + def __new__(mcs, name, bases, namespace, **kwargs): # noqa C901 + fields: Dict[str, ModelField] = {} + config = BaseConfig + validators: 'ValidatorListDict' = {} + + pre_root_validators, post_root_validators = [], [] + private_attributes: Dict[str, ModelPrivateAttr] = {} + base_private_attributes: Dict[str, ModelPrivateAttr] = {} + slots: SetStr = namespace.get('__slots__', ()) + slots = {slots} if isinstance(slots, str) else set(slots) + class_vars: SetStr = set() + hash_func: Optional[Callable[[Any], int]] = None + + for base in reversed(bases): + if _is_base_model_class_defined and issubclass(base, BaseModel) and base != BaseModel: + fields.update(smart_deepcopy(base.__fields__)) + config = inherit_config(base.__config__, config) + validators = inherit_validators(base.__validators__, validators) + pre_root_validators += base.__pre_root_validators__ + post_root_validators += base.__post_root_validators__ + base_private_attributes.update(base.__private_attributes__) + class_vars.update(base.__class_vars__) + hash_func = base.__hash__ + + resolve_forward_refs = kwargs.pop('__resolve_forward_refs__', True) + allowed_config_kwargs: SetStr = { + key + for key in dir(config) + if not (key.startswith('__') and key.endswith('__')) # skip dunder methods and attributes + } + config_kwargs = {key: kwargs.pop(key) for key in kwargs.keys() & allowed_config_kwargs} + config_from_namespace = namespace.get('Config') + if config_kwargs and config_from_namespace: + raise TypeError('Specifying config in two places is ambiguous, use either Config attribute or class kwargs') + config = inherit_config(config_from_namespace, config, **config_kwargs) + + validators = inherit_validators(extract_validators(namespace), validators) + vg = ValidatorGroup(validators) + + for f in fields.values(): + f.set_config(config) + extra_validators = vg.get_validators(f.name) + if extra_validators: + f.class_validators.update(extra_validators) + # re-run prepare to add extra validators + f.populate_validators() + + prepare_config(config, name) + + untouched_types = ANNOTATED_FIELD_UNTOUCHED_TYPES + + def is_untouched(v: Any) -> bool: + return isinstance(v, untouched_types) or v.__class__.__name__ == 'cython_function_or_method' + + if (namespace.get('__module__'), namespace.get('__qualname__')) != ('pydantic.main', 'BaseModel'): + annotations = resolve_annotations(namespace.get('__annotations__', {}), namespace.get('__module__', None)) + # annotation only fields need to come first in fields + for ann_name, ann_type in annotations.items(): + if is_classvar(ann_type): + class_vars.add(ann_name) + elif is_finalvar_with_default_val(ann_type, namespace.get(ann_name, Undefined)): + class_vars.add(ann_name) + elif is_valid_field(ann_name): + validate_field_name(bases, ann_name) + value = namespace.get(ann_name, Undefined) + allowed_types = get_args(ann_type) if is_union(get_origin(ann_type)) else (ann_type,) + if ( + is_untouched(value) + and ann_type != PyObject + and not any( + lenient_issubclass(get_origin(allowed_type), Type) for allowed_type in allowed_types + ) + ): + continue + fields[ann_name] = ModelField.infer( + name=ann_name, + value=value, + annotation=ann_type, + class_validators=vg.get_validators(ann_name), + config=config, + ) + elif ann_name not in namespace and config.underscore_attrs_are_private: + private_attributes[ann_name] = PrivateAttr() + + untouched_types = UNTOUCHED_TYPES + config.keep_untouched + for var_name, value in namespace.items(): + can_be_changed = var_name not in class_vars and not is_untouched(value) + if isinstance(value, ModelPrivateAttr): + if not is_valid_private_name(var_name): + raise NameError( + f'Private attributes "{var_name}" must not be a valid field name; ' + f'Use sunder or dunder names, e. g. "_{var_name}" or "__{var_name}__"' + ) + private_attributes[var_name] = value + elif config.underscore_attrs_are_private and is_valid_private_name(var_name) and can_be_changed: + private_attributes[var_name] = PrivateAttr(default=value) + elif is_valid_field(var_name) and var_name not in annotations and can_be_changed: + validate_field_name(bases, var_name) + inferred = ModelField.infer( + name=var_name, + value=value, + annotation=annotations.get(var_name, Undefined), + class_validators=vg.get_validators(var_name), + config=config, + ) + if var_name in fields: + if lenient_issubclass(inferred.type_, fields[var_name].type_): + inferred.type_ = fields[var_name].type_ + else: + raise TypeError( + f'The type of {name}.{var_name} differs from the new default value; ' + f'if you wish to change the type of this field, please use a type annotation' + ) + fields[var_name] = inferred + + _custom_root_type = ROOT_KEY in fields + if _custom_root_type: + validate_custom_root_type(fields) + vg.check_for_unused() + if config.json_encoders: + json_encoder = partial(custom_pydantic_encoder, config.json_encoders) + else: + json_encoder = pydantic_encoder + pre_rv_new, post_rv_new = extract_root_validators(namespace) + + if hash_func is None: + hash_func = generate_hash_function(config.frozen) + + exclude_from_namespace = fields | private_attributes.keys() | {'__slots__'} + new_namespace = { + '__config__': config, + '__fields__': fields, + '__exclude_fields__': { + name: field.field_info.exclude for name, field in fields.items() if field.field_info.exclude is not None + } + or None, + '__include_fields__': { + name: field.field_info.include for name, field in fields.items() if field.field_info.include is not None + } + or None, + '__validators__': vg.validators, + '__pre_root_validators__': unique_list( + pre_root_validators + pre_rv_new, + name_factory=lambda v: v.__name__, + ), + '__post_root_validators__': unique_list( + post_root_validators + post_rv_new, + name_factory=lambda skip_on_failure_and_v: skip_on_failure_and_v[1].__name__, + ), + '__schema_cache__': {}, + '__json_encoder__': staticmethod(json_encoder), + '__custom_root_type__': _custom_root_type, + '__private_attributes__': {**base_private_attributes, **private_attributes}, + '__slots__': slots | private_attributes.keys(), + '__hash__': hash_func, + '__class_vars__': class_vars, + **{n: v for n, v in namespace.items() if n not in exclude_from_namespace}, + } + + cls = super().__new__(mcs, name, bases, new_namespace, **kwargs) + # set __signature__ attr only for model class, but not for its instances + cls.__signature__ = ClassAttribute('__signature__', generate_model_signature(cls.__init__, fields, config)) + if resolve_forward_refs: + cls.__try_update_forward_refs__() + + # preserve `__set_name__` protocol defined in https://peps.python.org/pep-0487 + # for attributes not in `new_namespace` (e.g. private attributes) + for name, obj in namespace.items(): + if name not in new_namespace: + set_name = getattr(obj, '__set_name__', None) + if callable(set_name): + set_name(cls, name) + + return cls + + def __instancecheck__(self, instance: Any) -> bool: + """ + Avoid calling ABC _abc_subclasscheck unless we're pretty sure. + + See #3829 and python/cpython#92810 + """ + return hasattr(instance, '__fields__') and super().__instancecheck__(instance) + + +object_setattr = object.__setattr__ + + +class BaseModel(Representation, metaclass=ModelMetaclass): + if TYPE_CHECKING: + # populated by the metaclass, defined here to help IDEs only + __fields__: ClassVar[Dict[str, ModelField]] = {} + __include_fields__: ClassVar[Optional[Mapping[str, Any]]] = None + __exclude_fields__: ClassVar[Optional[Mapping[str, Any]]] = None + __validators__: ClassVar[Dict[str, AnyCallable]] = {} + __pre_root_validators__: ClassVar[List[AnyCallable]] + __post_root_validators__: ClassVar[List[Tuple[bool, AnyCallable]]] + __config__: ClassVar[Type[BaseConfig]] = BaseConfig + __json_encoder__: ClassVar[Callable[[Any], Any]] = lambda x: x + __schema_cache__: ClassVar['DictAny'] = {} + __custom_root_type__: ClassVar[bool] = False + __signature__: ClassVar['Signature'] + __private_attributes__: ClassVar[Dict[str, ModelPrivateAttr]] + __class_vars__: ClassVar[SetStr] + __fields_set__: ClassVar[SetStr] = set() + + Config = BaseConfig + __slots__ = ('__dict__', '__fields_set__') + __doc__ = '' # Null out the Representation docstring + + def __init__(__pydantic_self__, **data: Any) -> None: + """ + Create a new model by parsing and validating input data from keyword arguments. + + Raises ValidationError if the input data cannot be parsed to form a valid model. + """ + # Uses something other than `self` the first arg to allow "self" as a settable attribute + values, fields_set, validation_error = validate_model(__pydantic_self__.__class__, data) + if validation_error: + raise validation_error + try: + object_setattr(__pydantic_self__, '__dict__', values) + except TypeError as e: + raise TypeError( + 'Model values must be a dict; you may not have returned a dictionary from a root validator' + ) from e + object_setattr(__pydantic_self__, '__fields_set__', fields_set) + __pydantic_self__._init_private_attributes() + + @no_type_check + def __setattr__(self, name, value): # noqa: C901 (ignore complexity) + if name in self.__private_attributes__ or name in DUNDER_ATTRIBUTES: + return object_setattr(self, name, value) + + if self.__config__.extra is not Extra.allow and name not in self.__fields__: + raise ValueError(f'"{self.__class__.__name__}" object has no field "{name}"') + elif not self.__config__.allow_mutation or self.__config__.frozen: + raise TypeError(f'"{self.__class__.__name__}" is immutable and does not support item assignment') + elif name in self.__fields__ and self.__fields__[name].final: + raise TypeError( + f'"{self.__class__.__name__}" object "{name}" field is final and does not support reassignment' + ) + elif self.__config__.validate_assignment: + new_values = {**self.__dict__, name: value} + + for validator in self.__pre_root_validators__: + try: + new_values = validator(self.__class__, new_values) + except (ValueError, TypeError, AssertionError) as exc: + raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], self.__class__) + + known_field = self.__fields__.get(name, None) + if known_field: + # We want to + # - make sure validators are called without the current value for this field inside `values` + # - keep other values (e.g. submodels) untouched (using `BaseModel.dict()` will change them into dicts) + # - keep the order of the fields + if not known_field.field_info.allow_mutation: + raise TypeError(f'"{known_field.name}" has allow_mutation set to False and cannot be assigned') + dict_without_original_value = {k: v for k, v in self.__dict__.items() if k != name} + value, error_ = known_field.validate(value, dict_without_original_value, loc=name, cls=self.__class__) + if error_: + raise ValidationError([error_], self.__class__) + else: + new_values[name] = value + + errors = [] + for skip_on_failure, validator in self.__post_root_validators__: + if skip_on_failure and errors: + continue + try: + new_values = validator(self.__class__, new_values) + except (ValueError, TypeError, AssertionError) as exc: + errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) + if errors: + raise ValidationError(errors, self.__class__) + + # update the whole __dict__ as other values than just `value` + # may be changed (e.g. with `root_validator`) + object_setattr(self, '__dict__', new_values) + else: + self.__dict__[name] = value + + self.__fields_set__.add(name) + + def __getstate__(self) -> 'DictAny': + private_attrs = ((k, getattr(self, k, Undefined)) for k in self.__private_attributes__) + return { + '__dict__': self.__dict__, + '__fields_set__': self.__fields_set__, + '__private_attribute_values__': {k: v for k, v in private_attrs if v is not Undefined}, + } + + def __setstate__(self, state: 'DictAny') -> None: + object_setattr(self, '__dict__', state['__dict__']) + object_setattr(self, '__fields_set__', state['__fields_set__']) + for name, value in state.get('__private_attribute_values__', {}).items(): + object_setattr(self, name, value) + + def _init_private_attributes(self) -> None: + for name, private_attr in self.__private_attributes__.items(): + default = private_attr.get_default() + if default is not Undefined: + object_setattr(self, name, default) + + def dict( + self, + *, + include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, + exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, + by_alias: bool = False, + skip_defaults: Optional[bool] = None, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + ) -> 'DictStrAny': + """ + Generate a dictionary representation of the model, optionally specifying which fields to include or exclude. + + """ + if skip_defaults is not None: + warnings.warn( + f'{self.__class__.__name__}.dict(): "skip_defaults" is deprecated and replaced by "exclude_unset"', + DeprecationWarning, + ) + exclude_unset = skip_defaults + + return dict( + self._iter( + to_dict=True, + by_alias=by_alias, + include=include, + exclude=exclude, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + ) + + def json( + self, + *, + include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, + exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, + by_alias: bool = False, + skip_defaults: Optional[bool] = None, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + encoder: Optional[Callable[[Any], Any]] = None, + models_as_dict: bool = True, + **dumps_kwargs: Any, + ) -> str: + """ + Generate a JSON representation of the model, `include` and `exclude` arguments as per `dict()`. + + `encoder` is an optional function to supply as `default` to json.dumps(), other arguments as per `json.dumps()`. + """ + if skip_defaults is not None: + warnings.warn( + f'{self.__class__.__name__}.json(): "skip_defaults" is deprecated and replaced by "exclude_unset"', + DeprecationWarning, + ) + exclude_unset = skip_defaults + encoder = cast(Callable[[Any], Any], encoder or self.__json_encoder__) + + # We don't directly call `self.dict()`, which does exactly this with `to_dict=True` + # because we want to be able to keep raw `BaseModel` instances and not as `dict`. + # This allows users to write custom JSON encoders for given `BaseModel` classes. + data = dict( + self._iter( + to_dict=models_as_dict, + by_alias=by_alias, + include=include, + exclude=exclude, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + ) + if self.__custom_root_type__: + data = data[ROOT_KEY] + return self.__config__.json_dumps(data, default=encoder, **dumps_kwargs) + + @classmethod + def _enforce_dict_if_root(cls, obj: Any) -> Any: + if cls.__custom_root_type__ and ( + not (isinstance(obj, dict) and obj.keys() == {ROOT_KEY}) + or cls.__fields__[ROOT_KEY].shape in MAPPING_LIKE_SHAPES + ): + return {ROOT_KEY: obj} + else: + return obj + + @classmethod + def parse_obj(cls: Type['Model'], obj: Any) -> 'Model': + obj = cls._enforce_dict_if_root(obj) + if not isinstance(obj, dict): + try: + obj = dict(obj) + except (TypeError, ValueError) as e: + exc = TypeError(f'{cls.__name__} expected dict not {obj.__class__.__name__}') + raise ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls) from e + return cls(**obj) + + @classmethod + def parse_raw( + cls: Type['Model'], + b: StrBytes, + *, + content_type: str = None, + encoding: str = 'utf8', + proto: Protocol = None, + allow_pickle: bool = False, + ) -> 'Model': + try: + obj = load_str_bytes( + b, + proto=proto, + content_type=content_type, + encoding=encoding, + allow_pickle=allow_pickle, + json_loads=cls.__config__.json_loads, + ) + except (ValueError, TypeError, UnicodeDecodeError) as e: + raise ValidationError([ErrorWrapper(e, loc=ROOT_KEY)], cls) + return cls.parse_obj(obj) + + @classmethod + def parse_file( + cls: Type['Model'], + path: Union[str, Path], + *, + content_type: str = None, + encoding: str = 'utf8', + proto: Protocol = None, + allow_pickle: bool = False, + ) -> 'Model': + obj = load_file( + path, + proto=proto, + content_type=content_type, + encoding=encoding, + allow_pickle=allow_pickle, + json_loads=cls.__config__.json_loads, + ) + return cls.parse_obj(obj) + + @classmethod + def from_orm(cls: Type['Model'], obj: Any) -> 'Model': + if not cls.__config__.orm_mode: + raise ConfigError('You must have the config attribute orm_mode=True to use from_orm') + obj = {ROOT_KEY: obj} if cls.__custom_root_type__ else cls._decompose_class(obj) + m = cls.__new__(cls) + values, fields_set, validation_error = validate_model(cls, obj) + if validation_error: + raise validation_error + object_setattr(m, '__dict__', values) + object_setattr(m, '__fields_set__', fields_set) + m._init_private_attributes() + return m + + @classmethod + def construct(cls: Type['Model'], _fields_set: Optional['SetStr'] = None, **values: Any) -> 'Model': + """ + Creates a new model setting __dict__ and __fields_set__ from trusted or pre-validated data. + Default values are respected, but no other validation is performed. + Behaves as if `Config.extra = 'allow'` was set since it adds all passed values + """ + m = cls.__new__(cls) + fields_values: Dict[str, Any] = {} + for name, field in cls.__fields__.items(): + if field.alt_alias and field.alias in values: + fields_values[name] = values[field.alias] + elif name in values: + fields_values[name] = values[name] + elif not field.required: + fields_values[name] = field.get_default() + fields_values.update(values) + object_setattr(m, '__dict__', fields_values) + if _fields_set is None: + _fields_set = set(values.keys()) + object_setattr(m, '__fields_set__', _fields_set) + m._init_private_attributes() + return m + + def _copy_and_set_values(self: 'Model', values: 'DictStrAny', fields_set: 'SetStr', *, deep: bool) -> 'Model': + if deep: + # chances of having empty dict here are quite low for using smart_deepcopy + values = deepcopy(values) + + cls = self.__class__ + m = cls.__new__(cls) + object_setattr(m, '__dict__', values) + object_setattr(m, '__fields_set__', fields_set) + for name in self.__private_attributes__: + value = getattr(self, name, Undefined) + if value is not Undefined: + if deep: + value = deepcopy(value) + object_setattr(m, name, value) + + return m + + def copy( + self: 'Model', + *, + include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, + exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, + update: Optional['DictStrAny'] = None, + deep: bool = False, + ) -> 'Model': + """ + Duplicate a model, optionally choose which fields to include, exclude and change. + + :param include: fields to include in new model + :param exclude: fields to exclude from new model, as with values this takes precedence over include + :param update: values to change/add in the new model. Note: the data is not validated before creating + the new model: you should trust this data + :param deep: set to `True` to make a deep copy of the model + :return: new model instance + """ + + values = dict( + self._iter(to_dict=False, by_alias=False, include=include, exclude=exclude, exclude_unset=False), + **(update or {}), + ) + + # new `__fields_set__` can have unset optional fields with a set value in `update` kwarg + if update: + fields_set = self.__fields_set__ | update.keys() + else: + fields_set = set(self.__fields_set__) + + return self._copy_and_set_values(values, fields_set, deep=deep) + + @classmethod + def schema(cls, by_alias: bool = True, ref_template: str = default_ref_template) -> 'DictStrAny': + cached = cls.__schema_cache__.get((by_alias, ref_template)) + if cached is not None: + return cached + s = model_schema(cls, by_alias=by_alias, ref_template=ref_template) + cls.__schema_cache__[(by_alias, ref_template)] = s + return s + + @classmethod + def schema_json( + cls, *, by_alias: bool = True, ref_template: str = default_ref_template, **dumps_kwargs: Any + ) -> str: + from .json import pydantic_encoder + + return cls.__config__.json_dumps( + cls.schema(by_alias=by_alias, ref_template=ref_template), default=pydantic_encoder, **dumps_kwargs + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls: Type['Model'], value: Any) -> 'Model': + if isinstance(value, cls): + copy_on_model_validation = cls.__config__.copy_on_model_validation + # whether to deep or shallow copy the model on validation, None means do not copy + deep_copy: Optional[bool] = None + if copy_on_model_validation not in {'deep', 'shallow', 'none'}: + # Warn about deprecated behavior + warnings.warn( + "`copy_on_model_validation` should be a string: 'deep', 'shallow' or 'none'", DeprecationWarning + ) + if copy_on_model_validation: + deep_copy = False + + if copy_on_model_validation == 'shallow': + # shallow copy + deep_copy = False + elif copy_on_model_validation == 'deep': + # deep copy + deep_copy = True + + if deep_copy is None: + return value + else: + return value._copy_and_set_values(value.__dict__, value.__fields_set__, deep=deep_copy) + + value = cls._enforce_dict_if_root(value) + + if isinstance(value, dict): + return cls(**value) + elif cls.__config__.orm_mode: + return cls.from_orm(value) + else: + try: + value_as_dict = dict(value) + except (TypeError, ValueError) as e: + raise DictError() from e + return cls(**value_as_dict) + + @classmethod + def _decompose_class(cls: Type['Model'], obj: Any) -> GetterDict: + if isinstance(obj, GetterDict): + return obj + return cls.__config__.getter_dict(obj) + + @classmethod + @no_type_check + def _get_value( + cls, + v: Any, + to_dict: bool, + by_alias: bool, + include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']], + exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']], + exclude_unset: bool, + exclude_defaults: bool, + exclude_none: bool, + ) -> Any: + + if isinstance(v, BaseModel): + if to_dict: + v_dict = v.dict( + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + include=include, + exclude=exclude, + exclude_none=exclude_none, + ) + if ROOT_KEY in v_dict: + return v_dict[ROOT_KEY] + return v_dict + else: + return v.copy(include=include, exclude=exclude) + + value_exclude = ValueItems(v, exclude) if exclude else None + value_include = ValueItems(v, include) if include else None + + if isinstance(v, dict): + return { + k_: cls._get_value( + v_, + to_dict=to_dict, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + include=value_include and value_include.for_element(k_), + exclude=value_exclude and value_exclude.for_element(k_), + exclude_none=exclude_none, + ) + for k_, v_ in v.items() + if (not value_exclude or not value_exclude.is_excluded(k_)) + and (not value_include or value_include.is_included(k_)) + } + + elif sequence_like(v): + seq_args = ( + cls._get_value( + v_, + to_dict=to_dict, + by_alias=by_alias, + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + include=value_include and value_include.for_element(i), + exclude=value_exclude and value_exclude.for_element(i), + exclude_none=exclude_none, + ) + for i, v_ in enumerate(v) + if (not value_exclude or not value_exclude.is_excluded(i)) + and (not value_include or value_include.is_included(i)) + ) + + return v.__class__(*seq_args) if is_namedtuple(v.__class__) else v.__class__(seq_args) + + elif isinstance(v, Enum) and getattr(cls.Config, 'use_enum_values', False): + return v.value + + else: + return v + + @classmethod + def __try_update_forward_refs__(cls, **localns: Any) -> None: + """ + Same as update_forward_refs but will not raise exception + when forward references are not defined. + """ + update_model_forward_refs(cls, cls.__fields__.values(), cls.__config__.json_encoders, localns, (NameError,)) + + @classmethod + def update_forward_refs(cls, **localns: Any) -> None: + """ + Try to update ForwardRefs on fields based on this Model, globalns and localns. + """ + update_model_forward_refs(cls, cls.__fields__.values(), cls.__config__.json_encoders, localns) + + def __iter__(self) -> 'TupleGenerator': + """ + so `dict(model)` works + """ + yield from self.__dict__.items() + + def _iter( + self, + to_dict: bool = False, + by_alias: bool = False, + include: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, + exclude: Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']] = None, + exclude_unset: bool = False, + exclude_defaults: bool = False, + exclude_none: bool = False, + ) -> 'TupleGenerator': + + # Merge field set excludes with explicit exclude parameter with explicit overriding field set options. + # The extra "is not None" guards are not logically necessary but optimizes performance for the simple case. + if exclude is not None or self.__exclude_fields__ is not None: + exclude = ValueItems.merge(self.__exclude_fields__, exclude) + + if include is not None or self.__include_fields__ is not None: + include = ValueItems.merge(self.__include_fields__, include, intersect=True) + + allowed_keys = self._calculate_keys( + include=include, exclude=exclude, exclude_unset=exclude_unset # type: ignore + ) + if allowed_keys is None and not (to_dict or by_alias or exclude_unset or exclude_defaults or exclude_none): + # huge boost for plain _iter() + yield from self.__dict__.items() + return + + value_exclude = ValueItems(self, exclude) if exclude is not None else None + value_include = ValueItems(self, include) if include is not None else None + + for field_key, v in self.__dict__.items(): + if (allowed_keys is not None and field_key not in allowed_keys) or (exclude_none and v is None): + continue + + if exclude_defaults: + model_field = self.__fields__.get(field_key) + if not getattr(model_field, 'required', True) and getattr(model_field, 'default', _missing) == v: + continue + + if by_alias and field_key in self.__fields__: + dict_key = self.__fields__[field_key].alias + else: + dict_key = field_key + + if to_dict or value_include or value_exclude: + v = self._get_value( + v, + to_dict=to_dict, + by_alias=by_alias, + include=value_include and value_include.for_element(field_key), + exclude=value_exclude and value_exclude.for_element(field_key), + exclude_unset=exclude_unset, + exclude_defaults=exclude_defaults, + exclude_none=exclude_none, + ) + yield dict_key, v + + def _calculate_keys( + self, + include: Optional['MappingIntStrAny'], + exclude: Optional['MappingIntStrAny'], + exclude_unset: bool, + update: Optional['DictStrAny'] = None, + ) -> Optional[AbstractSet[str]]: + if include is None and exclude is None and exclude_unset is False: + return None + + keys: AbstractSet[str] + if exclude_unset: + keys = self.__fields_set__.copy() + else: + keys = self.__dict__.keys() + + if include is not None: + keys &= include.keys() + + if update: + keys -= update.keys() + + if exclude: + keys -= {k for k, v in exclude.items() if ValueItems.is_true(v)} + + return keys + + def __eq__(self, other: Any) -> bool: + if isinstance(other, BaseModel): + return self.dict() == other.dict() + else: + return self.dict() == other + + def __repr_args__(self) -> 'ReprArgs': + return [ + (k, v) + for k, v in self.__dict__.items() + if k not in DUNDER_ATTRIBUTES and (k not in self.__fields__ or self.__fields__[k].field_info.repr) + ] + + +_is_base_model_class_defined = True + + +@overload +def create_model( + __model_name: str, + *, + __config__: Optional[Type[BaseConfig]] = None, + __base__: None = None, + __module__: str = __name__, + __validators__: Dict[str, 'AnyClassMethod'] = None, + __cls_kwargs__: Dict[str, Any] = None, + **field_definitions: Any, +) -> Type['BaseModel']: + ... + + +@overload +def create_model( + __model_name: str, + *, + __config__: Optional[Type[BaseConfig]] = None, + __base__: Union[Type['Model'], Tuple[Type['Model'], ...]], + __module__: str = __name__, + __validators__: Dict[str, 'AnyClassMethod'] = None, + __cls_kwargs__: Dict[str, Any] = None, + **field_definitions: Any, +) -> Type['Model']: + ... + + +def create_model( + __model_name: str, + *, + __config__: Optional[Type[BaseConfig]] = None, + __base__: Union[None, Type['Model'], Tuple[Type['Model'], ...]] = None, + __module__: str = __name__, + __validators__: Dict[str, 'AnyClassMethod'] = None, + __cls_kwargs__: Dict[str, Any] = None, + __slots__: Optional[Tuple[str, ...]] = None, + **field_definitions: Any, +) -> Type['Model']: + """ + Dynamically create a model. + :param __model_name: name of the created model + :param __config__: config class to use for the new model + :param __base__: base class for the new model to inherit from + :param __module__: module of the created model + :param __validators__: a dict of method names and @validator class methods + :param __cls_kwargs__: a dict for class creation + :param __slots__: Deprecated, `__slots__` should not be passed to `create_model` + :param field_definitions: fields of the model (or extra fields if a base is supplied) + in the format `=(, )` or `=, e.g. + `foobar=(str, ...)` or `foobar=123`, or, for complex use-cases, in the format + `=` or `=(, )`, e.g. + `foo=Field(datetime, default_factory=datetime.utcnow, alias='bar')` or + `foo=(str, FieldInfo(title='Foo'))` + """ + if __slots__ is not None: + # __slots__ will be ignored from here on + warnings.warn('__slots__ should not be passed to create_model', RuntimeWarning) + + if __base__ is not None: + if __config__ is not None: + raise ConfigError('to avoid confusion __config__ and __base__ cannot be used together') + if not isinstance(__base__, tuple): + __base__ = (__base__,) + else: + __base__ = (cast(Type['Model'], BaseModel),) + + __cls_kwargs__ = __cls_kwargs__ or {} + + fields = {} + annotations = {} + + for f_name, f_def in field_definitions.items(): + if not is_valid_field(f_name): + warnings.warn(f'fields may not start with an underscore, ignoring "{f_name}"', RuntimeWarning) + if isinstance(f_def, tuple): + try: + f_annotation, f_value = f_def + except ValueError as e: + raise ConfigError( + 'field definitions should either be a tuple of (, ) or just a ' + 'default value, unfortunately this means tuples as ' + 'default values are not allowed' + ) from e + else: + f_annotation, f_value = None, f_def + + if f_annotation: + annotations[f_name] = f_annotation + fields[f_name] = f_value + + namespace: 'DictStrAny' = {'__annotations__': annotations, '__module__': __module__} + if __validators__: + namespace.update(__validators__) + namespace.update(fields) + if __config__: + namespace['Config'] = inherit_config(__config__, BaseConfig) + resolved_bases = resolve_bases(__base__) + meta, ns, kwds = prepare_class(__model_name, resolved_bases, kwds=__cls_kwargs__) + if resolved_bases is not __base__: + ns['__orig_bases__'] = __base__ + namespace.update(ns) + return meta(__model_name, resolved_bases, namespace, **kwds) + + +_missing = object() + + +def validate_model( # noqa: C901 (ignore complexity) + model: Type[BaseModel], input_data: 'DictStrAny', cls: 'ModelOrDc' = None +) -> Tuple['DictStrAny', 'SetStr', Optional[ValidationError]]: + """ + validate data against a model. + """ + values = {} + errors = [] + # input_data names, possibly alias + names_used = set() + # field names, never aliases + fields_set = set() + config = model.__config__ + check_extra = config.extra is not Extra.ignore + cls_ = cls or model + + for validator in model.__pre_root_validators__: + try: + input_data = validator(cls_, input_data) + except (ValueError, TypeError, AssertionError) as exc: + return {}, set(), ValidationError([ErrorWrapper(exc, loc=ROOT_KEY)], cls_) + + for name, field in model.__fields__.items(): + value = input_data.get(field.alias, _missing) + using_name = False + if value is _missing and config.allow_population_by_field_name and field.alt_alias: + value = input_data.get(field.name, _missing) + using_name = True + + if value is _missing: + if field.required: + errors.append(ErrorWrapper(MissingError(), loc=field.alias)) + continue + + value = field.get_default() + + if not config.validate_all and not field.validate_always: + values[name] = value + continue + else: + fields_set.add(name) + if check_extra: + names_used.add(field.name if using_name else field.alias) + + v_, errors_ = field.validate(value, values, loc=field.alias, cls=cls_) + if isinstance(errors_, ErrorWrapper): + errors.append(errors_) + elif isinstance(errors_, list): + errors.extend(errors_) + else: + values[name] = v_ + + if check_extra: + if isinstance(input_data, GetterDict): + extra = input_data.extra_keys() - names_used + else: + extra = input_data.keys() - names_used + if extra: + fields_set |= extra + if config.extra is Extra.allow: + for f in extra: + values[f] = input_data[f] + else: + for f in sorted(extra): + errors.append(ErrorWrapper(ExtraError(), loc=f)) + + for skip_on_failure, validator in model.__post_root_validators__: + if skip_on_failure and errors: + continue + try: + values = validator(cls_, values) + except (ValueError, TypeError, AssertionError) as exc: + errors.append(ErrorWrapper(exc, loc=ROOT_KEY)) + + if errors: + return values, fields_set, ValidationError(errors, cls_) + else: + return values, fields_set, None diff --git a/lib/pydantic/mypy.py b/lib/pydantic/mypy.py new file mode 100644 index 00000000..6bd9db18 --- /dev/null +++ b/lib/pydantic/mypy.py @@ -0,0 +1,850 @@ +import sys +from configparser import ConfigParser +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type as TypingType, Union + +from mypy.errorcodes import ErrorCode +from mypy.nodes import ( + ARG_NAMED, + ARG_NAMED_OPT, + ARG_OPT, + ARG_POS, + ARG_STAR2, + MDEF, + Argument, + AssignmentStmt, + Block, + CallExpr, + ClassDef, + Context, + Decorator, + EllipsisExpr, + FuncBase, + FuncDef, + JsonDict, + MemberExpr, + NameExpr, + PassStmt, + PlaceholderNode, + RefExpr, + StrExpr, + SymbolNode, + SymbolTableNode, + TempNode, + TypeInfo, + TypeVarExpr, + Var, +) +from mypy.options import Options +from mypy.plugin import ( + CheckerPluginInterface, + ClassDefContext, + FunctionContext, + MethodContext, + Plugin, + SemanticAnalyzerPluginInterface, +) +from mypy.plugins import dataclasses +from mypy.semanal import set_callable_name # type: ignore +from mypy.server.trigger import make_wildcard_trigger +from mypy.types import ( + AnyType, + CallableType, + Instance, + NoneType, + Overloaded, + Type, + TypeOfAny, + TypeType, + TypeVarType, + UnionType, + get_proper_type, +) +from mypy.typevars import fill_typevars +from mypy.util import get_unique_redefinition_name +from mypy.version import __version__ as mypy_version + +from pydantic.utils import is_valid_field + +try: + from mypy.types import TypeVarDef # type: ignore[attr-defined] +except ImportError: # pragma: no cover + # Backward-compatible with TypeVarDef from Mypy 0.910. + from mypy.types import TypeVarType as TypeVarDef + +CONFIGFILE_KEY = 'pydantic-mypy' +METADATA_KEY = 'pydantic-mypy-metadata' +BASEMODEL_FULLNAME = 'pydantic.main.BaseModel' +BASESETTINGS_FULLNAME = 'pydantic.env_settings.BaseSettings' +FIELD_FULLNAME = 'pydantic.fields.Field' +DATACLASS_FULLNAME = 'pydantic.dataclasses.dataclass' + + +def parse_mypy_version(version: str) -> Tuple[int, ...]: + return tuple(int(part) for part in version.split('+', 1)[0].split('.')) + + +MYPY_VERSION_TUPLE = parse_mypy_version(mypy_version) +BUILTINS_NAME = 'builtins' if MYPY_VERSION_TUPLE >= (0, 930) else '__builtins__' + + +def plugin(version: str) -> 'TypingType[Plugin]': + """ + `version` is the mypy version string + + We might want to use this to print a warning if the mypy version being used is + newer, or especially older, than we expect (or need). + """ + return PydanticPlugin + + +class PydanticPlugin(Plugin): + def __init__(self, options: Options) -> None: + self.plugin_config = PydanticPluginConfig(options) + super().__init__(options) + + def get_base_class_hook(self, fullname: str) -> 'Optional[Callable[[ClassDefContext], None]]': + sym = self.lookup_fully_qualified(fullname) + if sym and isinstance(sym.node, TypeInfo): # pragma: no branch + # No branching may occur if the mypy cache has not been cleared + if any(get_fullname(base) == BASEMODEL_FULLNAME for base in sym.node.mro): + return self._pydantic_model_class_maker_callback + return None + + def get_function_hook(self, fullname: str) -> 'Optional[Callable[[FunctionContext], Type]]': + sym = self.lookup_fully_qualified(fullname) + if sym and sym.fullname == FIELD_FULLNAME: + return self._pydantic_field_callback + return None + + def get_method_hook(self, fullname: str) -> Optional[Callable[[MethodContext], Type]]: + if fullname.endswith('.from_orm'): + return from_orm_callback + return None + + def get_class_decorator_hook(self, fullname: str) -> Optional[Callable[[ClassDefContext], None]]: + if fullname == DATACLASS_FULLNAME: + return dataclasses.dataclass_class_maker_callback # type: ignore[return-value] + return None + + def _pydantic_model_class_maker_callback(self, ctx: ClassDefContext) -> None: + transformer = PydanticModelTransformer(ctx, self.plugin_config) + transformer.transform() + + def _pydantic_field_callback(self, ctx: FunctionContext) -> 'Type': + """ + Extract the type of the `default` argument from the Field function, and use it as the return type. + + In particular: + * Check whether the default and default_factory argument is specified. + * Output an error if both are specified. + * Retrieve the type of the argument which is specified, and use it as return type for the function. + """ + default_any_type = ctx.default_return_type + + assert ctx.callee_arg_names[0] == 'default', '"default" is no longer first argument in Field()' + assert ctx.callee_arg_names[1] == 'default_factory', '"default_factory" is no longer second argument in Field()' + default_args = ctx.args[0] + default_factory_args = ctx.args[1] + + if default_args and default_factory_args: + error_default_and_default_factory_specified(ctx.api, ctx.context) + return default_any_type + + if default_args: + default_type = ctx.arg_types[0][0] + default_arg = default_args[0] + + # Fallback to default Any type if the field is required + if not isinstance(default_arg, EllipsisExpr): + return default_type + + elif default_factory_args: + default_factory_type = ctx.arg_types[1][0] + + # Functions which use `ParamSpec` can be overloaded, exposing the callable's types as a parameter + # Pydantic calls the default factory without any argument, so we retrieve the first item + if isinstance(default_factory_type, Overloaded): + if MYPY_VERSION_TUPLE > (0, 910): + default_factory_type = default_factory_type.items[0] + else: + # Mypy0.910 exposes the items of overloaded types in a function + default_factory_type = default_factory_type.items()[0] # type: ignore[operator] + + if isinstance(default_factory_type, CallableType): + ret_type = default_factory_type.ret_type + # mypy doesn't think `ret_type` has `args`, you'd think mypy should know, + # add this check in case it varies by version + args = getattr(ret_type, 'args', None) + if args: + if all(isinstance(arg, TypeVarType) for arg in args): + # Looks like the default factory is a type like `list` or `dict`, replace all args with `Any` + ret_type.args = tuple(default_any_type for _ in args) # type: ignore[attr-defined] + return ret_type + + return default_any_type + + +class PydanticPluginConfig: + __slots__ = ('init_forbid_extra', 'init_typed', 'warn_required_dynamic_aliases', 'warn_untyped_fields') + init_forbid_extra: bool + init_typed: bool + warn_required_dynamic_aliases: bool + warn_untyped_fields: bool + + def __init__(self, options: Options) -> None: + if options.config_file is None: # pragma: no cover + return + + toml_config = parse_toml(options.config_file) + if toml_config is not None: + config = toml_config.get('tool', {}).get('pydantic-mypy', {}) + for key in self.__slots__: + setting = config.get(key, False) + if not isinstance(setting, bool): + raise ValueError(f'Configuration value must be a boolean for key: {key}') + setattr(self, key, setting) + else: + plugin_config = ConfigParser() + plugin_config.read(options.config_file) + for key in self.__slots__: + setting = plugin_config.getboolean(CONFIGFILE_KEY, key, fallback=False) + setattr(self, key, setting) + + +def from_orm_callback(ctx: MethodContext) -> Type: + """ + Raise an error if orm_mode is not enabled + """ + model_type: Instance + if isinstance(ctx.type, CallableType) and isinstance(ctx.type.ret_type, Instance): + model_type = ctx.type.ret_type # called on the class + elif isinstance(ctx.type, Instance): + model_type = ctx.type # called on an instance (unusual, but still valid) + else: # pragma: no cover + detail = f'ctx.type: {ctx.type} (of type {ctx.type.__class__.__name__})' + error_unexpected_behavior(detail, ctx.api, ctx.context) + return ctx.default_return_type + pydantic_metadata = model_type.type.metadata.get(METADATA_KEY) + if pydantic_metadata is None: + return ctx.default_return_type + orm_mode = pydantic_metadata.get('config', {}).get('orm_mode') + if orm_mode is not True: + error_from_orm(get_name(model_type.type), ctx.api, ctx.context) + return ctx.default_return_type + + +class PydanticModelTransformer: + tracked_config_fields: Set[str] = { + 'extra', + 'allow_mutation', + 'frozen', + 'orm_mode', + 'allow_population_by_field_name', + 'alias_generator', + } + + def __init__(self, ctx: ClassDefContext, plugin_config: PydanticPluginConfig) -> None: + self._ctx = ctx + self.plugin_config = plugin_config + + def transform(self) -> None: + """ + Configures the BaseModel subclass according to the plugin settings. + + In particular: + * determines the model config and fields, + * adds a fields-aware signature for the initializer and construct methods + * freezes the class if allow_mutation = False or frozen = True + * stores the fields, config, and if the class is settings in the mypy metadata for access by subclasses + """ + ctx = self._ctx + info = self._ctx.cls.info + + self.adjust_validator_signatures() + config = self.collect_config() + fields = self.collect_fields(config) + for field in fields: + if info[field.name].type is None: + if not ctx.api.final_iteration: + ctx.api.defer() + is_settings = any(get_fullname(base) == BASESETTINGS_FULLNAME for base in info.mro[:-1]) + self.add_initializer(fields, config, is_settings) + self.add_construct_method(fields) + self.set_frozen(fields, frozen=config.allow_mutation is False or config.frozen is True) + info.metadata[METADATA_KEY] = { + 'fields': {field.name: field.serialize() for field in fields}, + 'config': config.set_values_dict(), + } + + def adjust_validator_signatures(self) -> None: + """When we decorate a function `f` with `pydantic.validator(...), mypy sees + `f` as a regular method taking a `self` instance, even though pydantic + internally wraps `f` with `classmethod` if necessary. + + Teach mypy this by marking any function whose outermost decorator is a + `validator()` call as a classmethod. + """ + for name, sym in self._ctx.cls.info.names.items(): + if isinstance(sym.node, Decorator): + first_dec = sym.node.original_decorators[0] + if ( + isinstance(first_dec, CallExpr) + and isinstance(first_dec.callee, NameExpr) + and first_dec.callee.fullname == 'pydantic.class_validators.validator' + ): + sym.node.func.is_class = True + + def collect_config(self) -> 'ModelConfigData': + """ + Collects the values of the config attributes that are used by the plugin, accounting for parent classes. + """ + ctx = self._ctx + cls = ctx.cls + config = ModelConfigData() + for stmt in cls.defs.body: + if not isinstance(stmt, ClassDef): + continue + if stmt.name == 'Config': + for substmt in stmt.defs.body: + if not isinstance(substmt, AssignmentStmt): + continue + config.update(self.get_config_update(substmt)) + if ( + config.has_alias_generator + and not config.allow_population_by_field_name + and self.plugin_config.warn_required_dynamic_aliases + ): + error_required_dynamic_aliases(ctx.api, stmt) + for info in cls.info.mro[1:]: # 0 is the current class + if METADATA_KEY not in info.metadata: + continue + + # Each class depends on the set of fields in its ancestors + ctx.api.add_plugin_dependency(make_wildcard_trigger(get_fullname(info))) + for name, value in info.metadata[METADATA_KEY]['config'].items(): + config.setdefault(name, value) + return config + + def collect_fields(self, model_config: 'ModelConfigData') -> List['PydanticModelField']: + """ + Collects the fields for the model, accounting for parent classes + """ + # First, collect fields belonging to the current class. + ctx = self._ctx + cls = self._ctx.cls + fields = [] # type: List[PydanticModelField] + known_fields = set() # type: Set[str] + for stmt in cls.defs.body: + if not isinstance(stmt, AssignmentStmt): # `and stmt.new_syntax` to require annotation + continue + + lhs = stmt.lvalues[0] + if not isinstance(lhs, NameExpr) or not is_valid_field(lhs.name): + continue + + if not stmt.new_syntax and self.plugin_config.warn_untyped_fields: + error_untyped_fields(ctx.api, stmt) + + # if lhs.name == '__config__': # BaseConfig not well handled; I'm not sure why yet + # continue + + sym = cls.info.names.get(lhs.name) + if sym is None: # pragma: no cover + # This is likely due to a star import (see the dataclasses plugin for a more detailed explanation) + # This is the same logic used in the dataclasses plugin + continue + + node = sym.node + if isinstance(node, PlaceholderNode): # pragma: no cover + # See the PlaceholderNode docstring for more detail about how this can occur + # Basically, it is an edge case when dealing with complex import logic + # This is the same logic used in the dataclasses plugin + continue + if not isinstance(node, Var): # pragma: no cover + # Don't know if this edge case still happens with the `is_valid_field` check above + # but better safe than sorry + continue + + # x: ClassVar[int] is ignored by dataclasses. + if node.is_classvar: + continue + + is_required = self.get_is_required(cls, stmt, lhs) + alias, has_dynamic_alias = self.get_alias_info(stmt) + if ( + has_dynamic_alias + and not model_config.allow_population_by_field_name + and self.plugin_config.warn_required_dynamic_aliases + ): + error_required_dynamic_aliases(ctx.api, stmt) + fields.append( + PydanticModelField( + name=lhs.name, + is_required=is_required, + alias=alias, + has_dynamic_alias=has_dynamic_alias, + line=stmt.line, + column=stmt.column, + ) + ) + known_fields.add(lhs.name) + all_fields = fields.copy() + for info in cls.info.mro[1:]: # 0 is the current class, -2 is BaseModel, -1 is object + if METADATA_KEY not in info.metadata: + continue + + superclass_fields = [] + # Each class depends on the set of fields in its ancestors + ctx.api.add_plugin_dependency(make_wildcard_trigger(get_fullname(info))) + + for name, data in info.metadata[METADATA_KEY]['fields'].items(): + if name not in known_fields: + field = PydanticModelField.deserialize(info, data) + known_fields.add(name) + superclass_fields.append(field) + else: + (field,) = (a for a in all_fields if a.name == name) + all_fields.remove(field) + superclass_fields.append(field) + all_fields = superclass_fields + all_fields + return all_fields + + def add_initializer(self, fields: List['PydanticModelField'], config: 'ModelConfigData', is_settings: bool) -> None: + """ + Adds a fields-aware `__init__` method to the class. + + The added `__init__` will be annotated with types vs. all `Any` depending on the plugin settings. + """ + ctx = self._ctx + typed = self.plugin_config.init_typed + use_alias = config.allow_population_by_field_name is not True + force_all_optional = is_settings or bool( + config.has_alias_generator and not config.allow_population_by_field_name + ) + init_arguments = self.get_field_arguments( + fields, typed=typed, force_all_optional=force_all_optional, use_alias=use_alias + ) + if not self.should_init_forbid_extra(fields, config): + var = Var('kwargs') + init_arguments.append(Argument(var, AnyType(TypeOfAny.explicit), None, ARG_STAR2)) + + if '__init__' not in ctx.cls.info.names: + add_method(ctx, '__init__', init_arguments, NoneType()) + + def add_construct_method(self, fields: List['PydanticModelField']) -> None: + """ + Adds a fully typed `construct` classmethod to the class. + + Similar to the fields-aware __init__ method, but always uses the field names (not aliases), + and does not treat settings fields as optional. + """ + ctx = self._ctx + set_str = ctx.api.named_type(f'{BUILTINS_NAME}.set', [ctx.api.named_type(f'{BUILTINS_NAME}.str')]) + optional_set_str = UnionType([set_str, NoneType()]) + fields_set_argument = Argument(Var('_fields_set', optional_set_str), optional_set_str, None, ARG_OPT) + construct_arguments = self.get_field_arguments(fields, typed=True, force_all_optional=False, use_alias=False) + construct_arguments = [fields_set_argument] + construct_arguments + + obj_type = ctx.api.named_type(f'{BUILTINS_NAME}.object') + self_tvar_name = '_PydanticBaseModel' # Make sure it does not conflict with other names in the class + tvar_fullname = ctx.cls.fullname + '.' + self_tvar_name + tvd = TypeVarDef(self_tvar_name, tvar_fullname, -1, [], obj_type) + self_tvar_expr = TypeVarExpr(self_tvar_name, tvar_fullname, [], obj_type) + ctx.cls.info.names[self_tvar_name] = SymbolTableNode(MDEF, self_tvar_expr) + + # Backward-compatible with TypeVarDef from Mypy 0.910. + if isinstance(tvd, TypeVarType): + self_type = tvd + else: + self_type = TypeVarType(tvd) # type: ignore[call-arg] + + add_method( + ctx, + 'construct', + construct_arguments, + return_type=self_type, + self_type=self_type, + tvar_def=tvd, + is_classmethod=True, + ) + + def set_frozen(self, fields: List['PydanticModelField'], frozen: bool) -> None: + """ + Marks all fields as properties so that attempts to set them trigger mypy errors. + + This is the same approach used by the attrs and dataclasses plugins. + """ + info = self._ctx.cls.info + for field in fields: + sym_node = info.names.get(field.name) + if sym_node is not None: + var = sym_node.node + assert isinstance(var, Var) + var.is_property = frozen + else: + var = field.to_var(info, use_alias=False) + var.info = info + var.is_property = frozen + var._fullname = get_fullname(info) + '.' + get_name(var) + info.names[get_name(var)] = SymbolTableNode(MDEF, var) + + def get_config_update(self, substmt: AssignmentStmt) -> Optional['ModelConfigData']: + """ + Determines the config update due to a single statement in the Config class definition. + + Warns if a tracked config attribute is set to a value the plugin doesn't know how to interpret (e.g., an int) + """ + lhs = substmt.lvalues[0] + if not (isinstance(lhs, NameExpr) and lhs.name in self.tracked_config_fields): + return None + if lhs.name == 'extra': + if isinstance(substmt.rvalue, StrExpr): + forbid_extra = substmt.rvalue.value == 'forbid' + elif isinstance(substmt.rvalue, MemberExpr): + forbid_extra = substmt.rvalue.name == 'forbid' + else: + error_invalid_config_value(lhs.name, self._ctx.api, substmt) + return None + return ModelConfigData(forbid_extra=forbid_extra) + if lhs.name == 'alias_generator': + has_alias_generator = True + if isinstance(substmt.rvalue, NameExpr) and substmt.rvalue.fullname == 'builtins.None': + has_alias_generator = False + return ModelConfigData(has_alias_generator=has_alias_generator) + if isinstance(substmt.rvalue, NameExpr) and substmt.rvalue.fullname in ('builtins.True', 'builtins.False'): + return ModelConfigData(**{lhs.name: substmt.rvalue.fullname == 'builtins.True'}) + error_invalid_config_value(lhs.name, self._ctx.api, substmt) + return None + + @staticmethod + def get_is_required(cls: ClassDef, stmt: AssignmentStmt, lhs: NameExpr) -> bool: + """ + Returns a boolean indicating whether the field defined in `stmt` is a required field. + """ + expr = stmt.rvalue + if isinstance(expr, TempNode): + # TempNode means annotation-only, so only non-required if Optional + value_type = get_proper_type(cls.info[lhs.name].type) + if isinstance(value_type, UnionType) and any(isinstance(item, NoneType) for item in value_type.items): + # Annotated as Optional, or otherwise having NoneType in the union + return False + return True + if isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME: + # The "default value" is a call to `Field`; at this point, the field is + # only required if default is Ellipsis (i.e., `field_name: Annotation = Field(...)`) or if default_factory + # is specified. + for arg, name in zip(expr.args, expr.arg_names): + # If name is None, then this arg is the default because it is the only positonal argument. + if name is None or name == 'default': + return arg.__class__ is EllipsisExpr + if name == 'default_factory': + return False + return True + # Only required if the "default value" is Ellipsis (i.e., `field_name: Annotation = ...`) + return isinstance(expr, EllipsisExpr) + + @staticmethod + def get_alias_info(stmt: AssignmentStmt) -> Tuple[Optional[str], bool]: + """ + Returns a pair (alias, has_dynamic_alias), extracted from the declaration of the field defined in `stmt`. + + `has_dynamic_alias` is True if and only if an alias is provided, but not as a string literal. + If `has_dynamic_alias` is True, `alias` will be None. + """ + expr = stmt.rvalue + if isinstance(expr, TempNode): + # TempNode means annotation-only + return None, False + + if not ( + isinstance(expr, CallExpr) and isinstance(expr.callee, RefExpr) and expr.callee.fullname == FIELD_FULLNAME + ): + # Assigned value is not a call to pydantic.fields.Field + return None, False + + for i, arg_name in enumerate(expr.arg_names): + if arg_name != 'alias': + continue + arg = expr.args[i] + if isinstance(arg, StrExpr): + return arg.value, False + else: + return None, True + return None, False + + def get_field_arguments( + self, fields: List['PydanticModelField'], typed: bool, force_all_optional: bool, use_alias: bool + ) -> List[Argument]: + """ + Helper function used during the construction of the `__init__` and `construct` method signatures. + + Returns a list of mypy Argument instances for use in the generated signatures. + """ + info = self._ctx.cls.info + arguments = [ + field.to_argument(info, typed=typed, force_optional=force_all_optional, use_alias=use_alias) + for field in fields + if not (use_alias and field.has_dynamic_alias) + ] + return arguments + + def should_init_forbid_extra(self, fields: List['PydanticModelField'], config: 'ModelConfigData') -> bool: + """ + Indicates whether the generated `__init__` should get a `**kwargs` at the end of its signature + + We disallow arbitrary kwargs if the extra config setting is "forbid", or if the plugin config says to, + *unless* a required dynamic alias is present (since then we can't determine a valid signature). + """ + if not config.allow_population_by_field_name: + if self.is_dynamic_alias_present(fields, bool(config.has_alias_generator)): + return False + if config.forbid_extra: + return True + return self.plugin_config.init_forbid_extra + + @staticmethod + def is_dynamic_alias_present(fields: List['PydanticModelField'], has_alias_generator: bool) -> bool: + """ + Returns whether any fields on the model have a "dynamic alias", i.e., an alias that cannot be + determined during static analysis. + """ + for field in fields: + if field.has_dynamic_alias: + return True + if has_alias_generator: + for field in fields: + if field.alias is None: + return True + return False + + +class PydanticModelField: + def __init__( + self, name: str, is_required: bool, alias: Optional[str], has_dynamic_alias: bool, line: int, column: int + ): + self.name = name + self.is_required = is_required + self.alias = alias + self.has_dynamic_alias = has_dynamic_alias + self.line = line + self.column = column + + def to_var(self, info: TypeInfo, use_alias: bool) -> Var: + name = self.name + if use_alias and self.alias is not None: + name = self.alias + return Var(name, info[self.name].type) + + def to_argument(self, info: TypeInfo, typed: bool, force_optional: bool, use_alias: bool) -> Argument: + if typed and info[self.name].type is not None: + type_annotation = info[self.name].type + else: + type_annotation = AnyType(TypeOfAny.explicit) + return Argument( + variable=self.to_var(info, use_alias), + type_annotation=type_annotation, + initializer=None, + kind=ARG_NAMED_OPT if force_optional or not self.is_required else ARG_NAMED, + ) + + def serialize(self) -> JsonDict: + return self.__dict__ + + @classmethod + def deserialize(cls, info: TypeInfo, data: JsonDict) -> 'PydanticModelField': + return cls(**data) + + +class ModelConfigData: + def __init__( + self, + forbid_extra: Optional[bool] = None, + allow_mutation: Optional[bool] = None, + frozen: Optional[bool] = None, + orm_mode: Optional[bool] = None, + allow_population_by_field_name: Optional[bool] = None, + has_alias_generator: Optional[bool] = None, + ): + self.forbid_extra = forbid_extra + self.allow_mutation = allow_mutation + self.frozen = frozen + self.orm_mode = orm_mode + self.allow_population_by_field_name = allow_population_by_field_name + self.has_alias_generator = has_alias_generator + + def set_values_dict(self) -> Dict[str, Any]: + return {k: v for k, v in self.__dict__.items() if v is not None} + + def update(self, config: Optional['ModelConfigData']) -> None: + if config is None: + return + for k, v in config.set_values_dict().items(): + setattr(self, k, v) + + def setdefault(self, key: str, value: Any) -> None: + if getattr(self, key) is None: + setattr(self, key, value) + + +ERROR_ORM = ErrorCode('pydantic-orm', 'Invalid from_orm call', 'Pydantic') +ERROR_CONFIG = ErrorCode('pydantic-config', 'Invalid config value', 'Pydantic') +ERROR_ALIAS = ErrorCode('pydantic-alias', 'Dynamic alias disallowed', 'Pydantic') +ERROR_UNEXPECTED = ErrorCode('pydantic-unexpected', 'Unexpected behavior', 'Pydantic') +ERROR_UNTYPED = ErrorCode('pydantic-field', 'Untyped field disallowed', 'Pydantic') +ERROR_FIELD_DEFAULTS = ErrorCode('pydantic-field', 'Invalid Field defaults', 'Pydantic') + + +def error_from_orm(model_name: str, api: CheckerPluginInterface, context: Context) -> None: + api.fail(f'"{model_name}" does not have orm_mode=True', context, code=ERROR_ORM) + + +def error_invalid_config_value(name: str, api: SemanticAnalyzerPluginInterface, context: Context) -> None: + api.fail(f'Invalid value for "Config.{name}"', context, code=ERROR_CONFIG) + + +def error_required_dynamic_aliases(api: SemanticAnalyzerPluginInterface, context: Context) -> None: + api.fail('Required dynamic aliases disallowed', context, code=ERROR_ALIAS) + + +def error_unexpected_behavior(detail: str, api: CheckerPluginInterface, context: Context) -> None: # pragma: no cover + # Can't think of a good way to test this, but I confirmed it renders as desired by adding to a non-error path + link = 'https://github.com/pydantic/pydantic/issues/new/choose' + full_message = f'The pydantic mypy plugin ran into unexpected behavior: {detail}\n' + full_message += f'Please consider reporting this bug at {link} so we can try to fix it!' + api.fail(full_message, context, code=ERROR_UNEXPECTED) + + +def error_untyped_fields(api: SemanticAnalyzerPluginInterface, context: Context) -> None: + api.fail('Untyped fields disallowed', context, code=ERROR_UNTYPED) + + +def error_default_and_default_factory_specified(api: CheckerPluginInterface, context: Context) -> None: + api.fail('Field default and default_factory cannot be specified together', context, code=ERROR_FIELD_DEFAULTS) + + +def add_method( + ctx: ClassDefContext, + name: str, + args: List[Argument], + return_type: Type, + self_type: Optional[Type] = None, + tvar_def: Optional[TypeVarDef] = None, + is_classmethod: bool = False, + is_new: bool = False, + # is_staticmethod: bool = False, +) -> None: + """ + Adds a new method to a class. + + This can be dropped if/when https://github.com/python/mypy/issues/7301 is merged + """ + info = ctx.cls.info + + # First remove any previously generated methods with the same name + # to avoid clashes and problems in the semantic analyzer. + if name in info.names: + sym = info.names[name] + if sym.plugin_generated and isinstance(sym.node, FuncDef): + ctx.cls.defs.body.remove(sym.node) # pragma: no cover + + self_type = self_type or fill_typevars(info) + if is_classmethod or is_new: + first = [Argument(Var('_cls'), TypeType.make_normalized(self_type), None, ARG_POS)] + # elif is_staticmethod: + # first = [] + else: + self_type = self_type or fill_typevars(info) + first = [Argument(Var('__pydantic_self__'), self_type, None, ARG_POS)] + args = first + args + arg_types, arg_names, arg_kinds = [], [], [] + for arg in args: + assert arg.type_annotation, 'All arguments must be fully typed.' + arg_types.append(arg.type_annotation) + arg_names.append(get_name(arg.variable)) + arg_kinds.append(arg.kind) + + function_type = ctx.api.named_type(f'{BUILTINS_NAME}.function') + signature = CallableType(arg_types, arg_kinds, arg_names, return_type, function_type) + if tvar_def: + signature.variables = [tvar_def] + + func = FuncDef(name, args, Block([PassStmt()])) + func.info = info + func.type = set_callable_name(signature, func) + func.is_class = is_classmethod + # func.is_static = is_staticmethod + func._fullname = get_fullname(info) + '.' + name + func.line = info.line + + # NOTE: we would like the plugin generated node to dominate, but we still + # need to keep any existing definitions so they get semantically analyzed. + if name in info.names: + # Get a nice unique name instead. + r_name = get_unique_redefinition_name(name, info.names) + info.names[r_name] = info.names[name] + + if is_classmethod: # or is_staticmethod: + func.is_decorated = True + v = Var(name, func.type) + v.info = info + v._fullname = func._fullname + # if is_classmethod: + v.is_classmethod = True + dec = Decorator(func, [NameExpr('classmethod')], v) + # else: + # v.is_staticmethod = True + # dec = Decorator(func, [NameExpr('staticmethod')], v) + + dec.line = info.line + sym = SymbolTableNode(MDEF, dec) + else: + sym = SymbolTableNode(MDEF, func) + sym.plugin_generated = True + + info.names[name] = sym + info.defn.defs.body.append(func) + + +def get_fullname(x: Union[FuncBase, SymbolNode]) -> str: + """ + Used for compatibility with mypy 0.740; can be dropped once support for 0.740 is dropped. + """ + fn = x.fullname + if callable(fn): # pragma: no cover + return fn() + return fn + + +def get_name(x: Union[FuncBase, SymbolNode]) -> str: + """ + Used for compatibility with mypy 0.740; can be dropped once support for 0.740 is dropped. + """ + fn = x.name + if callable(fn): # pragma: no cover + return fn() + return fn + + +def parse_toml(config_file: str) -> Optional[Dict[str, Any]]: + if not config_file.endswith('.toml'): + return None + + read_mode = 'rb' + if sys.version_info >= (3, 11): + import tomllib as toml_ + else: + try: + import tomli as toml_ + except ImportError: + # older versions of mypy have toml as a dependency, not tomli + read_mode = 'r' + try: + import toml as toml_ # type: ignore[no-redef] + except ImportError: # pragma: no cover + import warnings + + warnings.warn('No TOML parser installed, cannot read configuration from `pyproject.toml`.') + return None + + with open(config_file, read_mode) as rf: + return toml_.load(rf) # type: ignore[arg-type] diff --git a/lib/pydantic/networks.py b/lib/pydantic/networks.py new file mode 100644 index 00000000..c7d97186 --- /dev/null +++ b/lib/pydantic/networks.py @@ -0,0 +1,736 @@ +import re +from ipaddress import ( + IPv4Address, + IPv4Interface, + IPv4Network, + IPv6Address, + IPv6Interface, + IPv6Network, + _BaseAddress, + _BaseNetwork, +) +from typing import ( + TYPE_CHECKING, + Any, + Collection, + Dict, + Generator, + List, + Match, + Optional, + Pattern, + Set, + Tuple, + Type, + Union, + cast, + no_type_check, +) + +from . import errors +from .utils import Representation, update_not_none +from .validators import constr_length_validator, str_validator + +if TYPE_CHECKING: + import email_validator + from typing_extensions import TypedDict + + from .config import BaseConfig + from .fields import ModelField + from .typing import AnyCallable + + CallableGenerator = Generator[AnyCallable, None, None] + + class Parts(TypedDict, total=False): + scheme: str + user: Optional[str] + password: Optional[str] + ipv4: Optional[str] + ipv6: Optional[str] + domain: Optional[str] + port: Optional[str] + path: Optional[str] + query: Optional[str] + fragment: Optional[str] + + class HostParts(TypedDict, total=False): + host: str + tld: Optional[str] + host_type: Optional[str] + port: Optional[str] + rebuild: bool + +else: + email_validator = None + + class Parts(dict): + pass + + +NetworkType = Union[str, bytes, int, Tuple[Union[str, bytes, int], Union[str, int]]] + +__all__ = [ + 'AnyUrl', + 'AnyHttpUrl', + 'FileUrl', + 'HttpUrl', + 'stricturl', + 'EmailStr', + 'NameEmail', + 'IPvAnyAddress', + 'IPvAnyInterface', + 'IPvAnyNetwork', + 'PostgresDsn', + 'CockroachDsn', + 'AmqpDsn', + 'RedisDsn', + 'MongoDsn', + 'KafkaDsn', + 'validate_email', +] + +_url_regex_cache = None +_multi_host_url_regex_cache = None +_ascii_domain_regex_cache = None +_int_domain_regex_cache = None +_host_regex_cache = None + +_host_regex = ( + r'(?:' + r'(?P(?:\d{1,3}\.){3}\d{1,3})(?=$|[/:#?])|' # ipv4 + r'(?P\[[A-F0-9]*:[A-F0-9:]+\])(?=$|[/:#?])|' # ipv6 + r'(?P[^\s/:?#]+)' # domain, validation occurs later + r')?' + r'(?::(?P\d+))?' # port +) +_scheme_regex = r'(?:(?P[a-z][a-z0-9+\-.]+)://)?' # scheme https://tools.ietf.org/html/rfc3986#appendix-A +_user_info_regex = r'(?:(?P[^\s:/]*)(?::(?P[^\s/]*))?@)?' +_path_regex = r'(?P/[^\s?#]*)?' +_query_regex = r'(?:\?(?P[^\s#]*))?' +_fragment_regex = r'(?:#(?P[^\s#]*))?' + + +def url_regex() -> Pattern[str]: + global _url_regex_cache + if _url_regex_cache is None: + _url_regex_cache = re.compile( + rf'{_scheme_regex}{_user_info_regex}{_host_regex}{_path_regex}{_query_regex}{_fragment_regex}', + re.IGNORECASE, + ) + return _url_regex_cache + + +def multi_host_url_regex() -> Pattern[str]: + """ + Compiled multi host url regex. + + Additionally to `url_regex` it allows to match multiple hosts. + E.g. host1.db.net,host2.db.net + """ + global _multi_host_url_regex_cache + if _multi_host_url_regex_cache is None: + _multi_host_url_regex_cache = re.compile( + rf'{_scheme_regex}{_user_info_regex}' + r'(?P([^/]*))' # validation occurs later + rf'{_path_regex}{_query_regex}{_fragment_regex}', + re.IGNORECASE, + ) + return _multi_host_url_regex_cache + + +def ascii_domain_regex() -> Pattern[str]: + global _ascii_domain_regex_cache + if _ascii_domain_regex_cache is None: + ascii_chunk = r'[_0-9a-z](?:[-_0-9a-z]{0,61}[_0-9a-z])?' + ascii_domain_ending = r'(?P\.[a-z]{2,63})?\.?' + _ascii_domain_regex_cache = re.compile( + fr'(?:{ascii_chunk}\.)*?{ascii_chunk}{ascii_domain_ending}', re.IGNORECASE + ) + return _ascii_domain_regex_cache + + +def int_domain_regex() -> Pattern[str]: + global _int_domain_regex_cache + if _int_domain_regex_cache is None: + int_chunk = r'[_0-9a-\U00040000](?:[-_0-9a-\U00040000]{0,61}[_0-9a-\U00040000])?' + int_domain_ending = r'(?P(\.[^\W\d_]{2,63})|(\.(?:xn--)[_0-9a-z-]{2,63}))?\.?' + _int_domain_regex_cache = re.compile(fr'(?:{int_chunk}\.)*?{int_chunk}{int_domain_ending}', re.IGNORECASE) + return _int_domain_regex_cache + + +def host_regex() -> Pattern[str]: + global _host_regex_cache + if _host_regex_cache is None: + _host_regex_cache = re.compile( + _host_regex, + re.IGNORECASE, + ) + return _host_regex_cache + + +class AnyUrl(str): + strip_whitespace = True + min_length = 1 + max_length = 2**16 + allowed_schemes: Optional[Collection[str]] = None + tld_required: bool = False + user_required: bool = False + host_required: bool = True + hidden_parts: Set[str] = set() + + __slots__ = ('scheme', 'user', 'password', 'host', 'tld', 'host_type', 'port', 'path', 'query', 'fragment') + + @no_type_check + def __new__(cls, url: Optional[str], **kwargs) -> object: + return str.__new__(cls, cls.build(**kwargs) if url is None else url) + + def __init__( + self, + url: str, + *, + scheme: str, + user: Optional[str] = None, + password: Optional[str] = None, + host: Optional[str] = None, + tld: Optional[str] = None, + host_type: str = 'domain', + port: Optional[str] = None, + path: Optional[str] = None, + query: Optional[str] = None, + fragment: Optional[str] = None, + ) -> None: + str.__init__(url) + self.scheme = scheme + self.user = user + self.password = password + self.host = host + self.tld = tld + self.host_type = host_type + self.port = port + self.path = path + self.query = query + self.fragment = fragment + + @classmethod + def build( + cls, + *, + scheme: str, + user: Optional[str] = None, + password: Optional[str] = None, + host: str, + port: Optional[str] = None, + path: Optional[str] = None, + query: Optional[str] = None, + fragment: Optional[str] = None, + **_kwargs: str, + ) -> str: + parts = Parts( + scheme=scheme, + user=user, + password=password, + host=host, + port=port, + path=path, + query=query, + fragment=fragment, + **_kwargs, # type: ignore[misc] + ) + + url = scheme + '://' + if user: + url += user + if password: + url += ':' + password + if user or password: + url += '@' + url += host + if port and ('port' not in cls.hidden_parts or cls.get_default_parts(parts).get('port') != port): + url += ':' + port + if path: + url += path + if query: + url += '?' + query + if fragment: + url += '#' + fragment + return url + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length, format='uri') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, value: Any, field: 'ModelField', config: 'BaseConfig') -> 'AnyUrl': + if value.__class__ == cls: + return value + value = str_validator(value) + if cls.strip_whitespace: + value = value.strip() + url: str = cast(str, constr_length_validator(value, field, config)) + + m = cls._match_url(url) + # the regex should always match, if it doesn't please report with details of the URL tried + assert m, 'URL regex failed unexpectedly' + + original_parts = cast('Parts', m.groupdict()) + parts = cls.apply_default_parts(original_parts) + parts = cls.validate_parts(parts) + + if m.end() != len(url): + raise errors.UrlExtraError(extra=url[m.end() :]) + + return cls._build_url(m, url, parts) + + @classmethod + def _build_url(cls, m: Match[str], url: str, parts: 'Parts') -> 'AnyUrl': + """ + Validate hosts and build the AnyUrl object. Split from `validate` so this method + can be altered in `MultiHostDsn`. + """ + host, tld, host_type, rebuild = cls.validate_host(parts) + + return cls( + None if rebuild else url, + scheme=parts['scheme'], + user=parts['user'], + password=parts['password'], + host=host, + tld=tld, + host_type=host_type, + port=parts['port'], + path=parts['path'], + query=parts['query'], + fragment=parts['fragment'], + ) + + @staticmethod + def _match_url(url: str) -> Optional[Match[str]]: + return url_regex().match(url) + + @staticmethod + def _validate_port(port: Optional[str]) -> None: + if port is not None and int(port) > 65_535: + raise errors.UrlPortError() + + @classmethod + def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts': + """ + A method used to validate parts of a URL. + Could be overridden to set default values for parts if missing + """ + scheme = parts['scheme'] + if scheme is None: + raise errors.UrlSchemeError() + + if cls.allowed_schemes and scheme.lower() not in cls.allowed_schemes: + raise errors.UrlSchemePermittedError(set(cls.allowed_schemes)) + + if validate_port: + cls._validate_port(parts['port']) + + user = parts['user'] + if cls.user_required and user is None: + raise errors.UrlUserInfoError() + + return parts + + @classmethod + def validate_host(cls, parts: 'Parts') -> Tuple[str, Optional[str], str, bool]: + tld, host_type, rebuild = None, None, False + for f in ('domain', 'ipv4', 'ipv6'): + host = parts[f] # type: ignore[literal-required] + if host: + host_type = f + break + + if host is None: + if cls.host_required: + raise errors.UrlHostError() + elif host_type == 'domain': + is_international = False + d = ascii_domain_regex().fullmatch(host) + if d is None: + d = int_domain_regex().fullmatch(host) + if d is None: + raise errors.UrlHostError() + is_international = True + + tld = d.group('tld') + if tld is None and not is_international: + d = int_domain_regex().fullmatch(host) + assert d is not None + tld = d.group('tld') + is_international = True + + if tld is not None: + tld = tld[1:] + elif cls.tld_required: + raise errors.UrlHostTldError() + + if is_international: + host_type = 'int_domain' + rebuild = True + host = host.encode('idna').decode('ascii') + if tld is not None: + tld = tld.encode('idna').decode('ascii') + + return host, tld, host_type, rebuild # type: ignore + + @staticmethod + def get_default_parts(parts: 'Parts') -> 'Parts': + return {} + + @classmethod + def apply_default_parts(cls, parts: 'Parts') -> 'Parts': + for key, value in cls.get_default_parts(parts).items(): + if not parts[key]: # type: ignore[literal-required] + parts[key] = value # type: ignore[literal-required] + return parts + + def __repr__(self) -> str: + extra = ', '.join(f'{n}={getattr(self, n)!r}' for n in self.__slots__ if getattr(self, n) is not None) + return f'{self.__class__.__name__}({super().__repr__()}, {extra})' + + +class AnyHttpUrl(AnyUrl): + allowed_schemes = {'http', 'https'} + + __slots__ = () + + +class HttpUrl(AnyHttpUrl): + tld_required = True + # https://stackoverflow.com/questions/417142/what-is-the-maximum-length-of-a-url-in-different-browsers + max_length = 2083 + hidden_parts = {'port'} + + @staticmethod + def get_default_parts(parts: 'Parts') -> 'Parts': + return {'port': '80' if parts['scheme'] == 'http' else '443'} + + +class FileUrl(AnyUrl): + allowed_schemes = {'file'} + host_required = False + + __slots__ = () + + +class MultiHostDsn(AnyUrl): + __slots__ = AnyUrl.__slots__ + ('hosts',) + + def __init__(self, *args: Any, hosts: Optional[List['HostParts']] = None, **kwargs: Any): + super().__init__(*args, **kwargs) + self.hosts = hosts + + @staticmethod + def _match_url(url: str) -> Optional[Match[str]]: + return multi_host_url_regex().match(url) + + @classmethod + def validate_parts(cls, parts: 'Parts', validate_port: bool = True) -> 'Parts': + return super().validate_parts(parts, validate_port=False) + + @classmethod + def _build_url(cls, m: Match[str], url: str, parts: 'Parts') -> 'MultiHostDsn': + hosts_parts: List['HostParts'] = [] + host_re = host_regex() + for host in m.groupdict()['hosts'].split(','): + d: Parts = host_re.match(host).groupdict() # type: ignore + host, tld, host_type, rebuild = cls.validate_host(d) + port = d.get('port') + cls._validate_port(port) + hosts_parts.append( + { + 'host': host, + 'host_type': host_type, + 'tld': tld, + 'rebuild': rebuild, + 'port': port, + } + ) + + if len(hosts_parts) > 1: + return cls( + None if any([hp['rebuild'] for hp in hosts_parts]) else url, + scheme=parts['scheme'], + user=parts['user'], + password=parts['password'], + path=parts['path'], + query=parts['query'], + fragment=parts['fragment'], + host_type=None, + hosts=hosts_parts, + ) + else: + # backwards compatibility with single host + host_part = hosts_parts[0] + return cls( + None if host_part['rebuild'] else url, + scheme=parts['scheme'], + user=parts['user'], + password=parts['password'], + host=host_part['host'], + tld=host_part['tld'], + host_type=host_part['host_type'], + port=host_part.get('port'), + path=parts['path'], + query=parts['query'], + fragment=parts['fragment'], + ) + + +class PostgresDsn(MultiHostDsn): + allowed_schemes = { + 'postgres', + 'postgresql', + 'postgresql+asyncpg', + 'postgresql+pg8000', + 'postgresql+psycopg2', + 'postgresql+psycopg2cffi', + 'postgresql+py-postgresql', + 'postgresql+pygresql', + } + user_required = True + + __slots__ = () + + +class CockroachDsn(AnyUrl): + allowed_schemes = { + 'cockroachdb', + 'cockroachdb+psycopg2', + 'cockroachdb+asyncpg', + } + user_required = True + + +class AmqpDsn(AnyUrl): + allowed_schemes = {'amqp', 'amqps'} + host_required = False + + +class RedisDsn(AnyUrl): + __slots__ = () + allowed_schemes = {'redis', 'rediss'} + host_required = False + + @staticmethod + def get_default_parts(parts: 'Parts') -> 'Parts': + return { + 'domain': 'localhost' if not (parts['ipv4'] or parts['ipv6']) else '', + 'port': '6379', + 'path': '/0', + } + + +class MongoDsn(AnyUrl): + allowed_schemes = {'mongodb'} + + # TODO: Needed to generic "Parts" for "Replica Set", "Sharded Cluster", and other mongodb deployment modes + @staticmethod + def get_default_parts(parts: 'Parts') -> 'Parts': + return { + 'port': '27017', + } + + +class KafkaDsn(AnyUrl): + allowed_schemes = {'kafka'} + + @staticmethod + def get_default_parts(parts: 'Parts') -> 'Parts': + return { + 'domain': 'localhost', + 'port': '9092', + } + + +def stricturl( + *, + strip_whitespace: bool = True, + min_length: int = 1, + max_length: int = 2**16, + tld_required: bool = True, + host_required: bool = True, + allowed_schemes: Optional[Collection[str]] = None, +) -> Type[AnyUrl]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict( + strip_whitespace=strip_whitespace, + min_length=min_length, + max_length=max_length, + tld_required=tld_required, + host_required=host_required, + allowed_schemes=allowed_schemes, + ) + return type('UrlValue', (AnyUrl,), namespace) + + +def import_email_validator() -> None: + global email_validator + try: + import email_validator + except ImportError as e: + raise ImportError('email-validator is not installed, run `pip install pydantic[email]`') from e + + +class EmailStr(str): + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format='email') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + # included here and below so the error happens straight away + import_email_validator() + + yield str_validator + yield cls.validate + + @classmethod + def validate(cls, value: Union[str]) -> str: + return validate_email(value)[1] + + +class NameEmail(Representation): + __slots__ = 'name', 'email' + + def __init__(self, name: str, email: str): + self.name = name + self.email = email + + def __eq__(self, other: Any) -> bool: + return isinstance(other, NameEmail) and (self.name, self.email) == (other.name, other.email) + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format='name-email') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + import_email_validator() + + yield cls.validate + + @classmethod + def validate(cls, value: Any) -> 'NameEmail': + if value.__class__ == cls: + return value + value = str_validator(value) + return cls(*validate_email(value)) + + def __str__(self) -> str: + return f'{self.name} <{self.email}>' + + +class IPvAnyAddress(_BaseAddress): + __slots__ = () + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format='ipvanyaddress') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, value: Union[str, bytes, int]) -> Union[IPv4Address, IPv6Address]: + try: + return IPv4Address(value) + except ValueError: + pass + + try: + return IPv6Address(value) + except ValueError: + raise errors.IPvAnyAddressError() + + +class IPvAnyInterface(_BaseAddress): + __slots__ = () + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format='ipvanyinterface') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, value: NetworkType) -> Union[IPv4Interface, IPv6Interface]: + try: + return IPv4Interface(value) + except ValueError: + pass + + try: + return IPv6Interface(value) + except ValueError: + raise errors.IPvAnyInterfaceError() + + +class IPvAnyNetwork(_BaseNetwork): # type: ignore + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format='ipvanynetwork') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, value: NetworkType) -> Union[IPv4Network, IPv6Network]: + # Assume IP Network is defined with a default value for ``strict`` argument. + # Define your own class if you want to specify network address check strictness. + try: + return IPv4Network(value) + except ValueError: + pass + + try: + return IPv6Network(value) + except ValueError: + raise errors.IPvAnyNetworkError() + + +pretty_email_regex = re.compile(r'([\w ]*?) *<(.*)> *') + + +def validate_email(value: Union[str]) -> Tuple[str, str]: + """ + Brutally simple email address validation. Note unlike most email address validation + * raw ip address (literal) domain parts are not allowed. + * "John Doe " style "pretty" email addresses are processed + * the local part check is extremely basic. This raises the possibility of unicode spoofing, but no better + solution is really possible. + * spaces are striped from the beginning and end of addresses but no error is raised + + See RFC 5322 but treat it with suspicion, there seems to exist no universally acknowledged test for a valid email! + """ + if email_validator is None: + import_email_validator() + + m = pretty_email_regex.fullmatch(value) + name: Optional[str] = None + if m: + name, value = m.groups() + + email = value.strip() + + try: + email_validator.validate_email(email, check_deliverability=False) + except email_validator.EmailNotValidError as e: + raise errors.EmailError() from e + + at_index = email.index('@') + local_part = email[:at_index] # RFC 5321, local part must be case-sensitive. + global_part = email[at_index:].lower() + + return name or local_part, local_part + global_part diff --git a/lib/pydantic/parse.py b/lib/pydantic/parse.py new file mode 100644 index 00000000..7ac330ca --- /dev/null +++ b/lib/pydantic/parse.py @@ -0,0 +1,66 @@ +import json +import pickle +from enum import Enum +from pathlib import Path +from typing import Any, Callable, Union + +from .types import StrBytes + + +class Protocol(str, Enum): + json = 'json' + pickle = 'pickle' + + +def load_str_bytes( + b: StrBytes, + *, + content_type: str = None, + encoding: str = 'utf8', + proto: Protocol = None, + allow_pickle: bool = False, + json_loads: Callable[[str], Any] = json.loads, +) -> Any: + if proto is None and content_type: + if content_type.endswith(('json', 'javascript')): + pass + elif allow_pickle and content_type.endswith('pickle'): + proto = Protocol.pickle + else: + raise TypeError(f'Unknown content-type: {content_type}') + + proto = proto or Protocol.json + + if proto == Protocol.json: + if isinstance(b, bytes): + b = b.decode(encoding) + return json_loads(b) + elif proto == Protocol.pickle: + if not allow_pickle: + raise RuntimeError('Trying to decode with pickle with allow_pickle=False') + bb = b if isinstance(b, bytes) else b.encode() + return pickle.loads(bb) + else: + raise TypeError(f'Unknown protocol: {proto}') + + +def load_file( + path: Union[str, Path], + *, + content_type: str = None, + encoding: str = 'utf8', + proto: Protocol = None, + allow_pickle: bool = False, + json_loads: Callable[[str], Any] = json.loads, +) -> Any: + path = Path(path) + b = path.read_bytes() + if content_type is None: + if path.suffix in ('.js', '.json'): + proto = Protocol.json + elif path.suffix == '.pkl': + proto = Protocol.pickle + + return load_str_bytes( + b, proto=proto, content_type=content_type, encoding=encoding, allow_pickle=allow_pickle, json_loads=json_loads + ) diff --git a/lib/pydantic/py.typed b/lib/pydantic/py.typed new file mode 100644 index 00000000..e69de29b diff --git a/lib/pydantic/schema.py b/lib/pydantic/schema.py new file mode 100644 index 00000000..e7af56f1 --- /dev/null +++ b/lib/pydantic/schema.py @@ -0,0 +1,1153 @@ +import re +import warnings +from collections import defaultdict +from datetime import date, datetime, time, timedelta +from decimal import Decimal +from enum import Enum +from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Dict, + ForwardRef, + FrozenSet, + Generic, + Iterable, + List, + Optional, + Pattern, + Sequence, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, +) +from uuid import UUID + +from typing_extensions import Annotated, Literal + +from .fields import ( + MAPPING_LIKE_SHAPES, + SHAPE_DEQUE, + SHAPE_FROZENSET, + SHAPE_GENERIC, + SHAPE_ITERABLE, + SHAPE_LIST, + SHAPE_SEQUENCE, + SHAPE_SET, + SHAPE_SINGLETON, + SHAPE_TUPLE, + SHAPE_TUPLE_ELLIPSIS, + FieldInfo, + ModelField, +) +from .json import pydantic_encoder +from .networks import AnyUrl, EmailStr +from .types import ( + ConstrainedDecimal, + ConstrainedFloat, + ConstrainedFrozenSet, + ConstrainedInt, + ConstrainedList, + ConstrainedSet, + SecretBytes, + SecretStr, + StrictBytes, + StrictStr, + conbytes, + condecimal, + confloat, + confrozenset, + conint, + conlist, + conset, + constr, +) +from .typing import ( + all_literal_values, + get_args, + get_origin, + get_sub_types, + is_callable_type, + is_literal_type, + is_namedtuple, + is_none_type, + is_union, +) +from .utils import ROOT_KEY, get_model, lenient_issubclass + +if TYPE_CHECKING: + from .dataclasses import Dataclass + from .main import BaseModel + +default_prefix = '#/definitions/' +default_ref_template = '#/definitions/{model}' + +TypeModelOrEnum = Union[Type['BaseModel'], Type[Enum]] +TypeModelSet = Set[TypeModelOrEnum] + + +def _apply_modify_schema( + modify_schema: Callable[..., None], field: Optional[ModelField], field_schema: Dict[str, Any] +) -> None: + from inspect import signature + + sig = signature(modify_schema) + args = set(sig.parameters.keys()) + if 'field' in args or 'kwargs' in args: + modify_schema(field_schema, field=field) + else: + modify_schema(field_schema) + + +def schema( + models: Sequence[Union[Type['BaseModel'], Type['Dataclass']]], + *, + by_alias: bool = True, + title: Optional[str] = None, + description: Optional[str] = None, + ref_prefix: Optional[str] = None, + ref_template: str = default_ref_template, +) -> Dict[str, Any]: + """ + Process a list of models and generate a single JSON Schema with all of them defined in the ``definitions`` + top-level JSON key, including their sub-models. + + :param models: a list of models to include in the generated JSON Schema + :param by_alias: generate the schemas using the aliases defined, if any + :param title: title for the generated schema that includes the definitions + :param description: description for the generated schema + :param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the + default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere + else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the + top-level key ``definitions``, so you can extract them from there. But all the references will have the set + prefix. + :param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful + for references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For + a sibling json file in a ``/schemas`` directory use ``"/schemas/${model}.json#"``. + :return: dict with the JSON Schema with a ``definitions`` top-level key including the schema definitions for + the models and sub-models passed in ``models``. + """ + clean_models = [get_model(model) for model in models] + flat_models = get_flat_models_from_models(clean_models) + model_name_map = get_model_name_map(flat_models) + definitions = {} + output_schema: Dict[str, Any] = {} + if title: + output_schema['title'] = title + if description: + output_schema['description'] = description + for model in clean_models: + m_schema, m_definitions, m_nested_models = model_process_schema( + model, + by_alias=by_alias, + model_name_map=model_name_map, + ref_prefix=ref_prefix, + ref_template=ref_template, + ) + definitions.update(m_definitions) + model_name = model_name_map[model] + definitions[model_name] = m_schema + if definitions: + output_schema['definitions'] = definitions + return output_schema + + +def model_schema( + model: Union[Type['BaseModel'], Type['Dataclass']], + by_alias: bool = True, + ref_prefix: Optional[str] = None, + ref_template: str = default_ref_template, +) -> Dict[str, Any]: + """ + Generate a JSON Schema for one model. With all the sub-models defined in the ``definitions`` top-level + JSON key. + + :param model: a Pydantic model (a class that inherits from BaseModel) + :param by_alias: generate the schemas using the aliases defined, if any + :param ref_prefix: the JSON Pointer prefix for schema references with ``$ref``, if None, will be set to the + default of ``#/definitions/``. Update it if you want the schemas to reference the definitions somewhere + else, e.g. for OpenAPI use ``#/components/schemas/``. The resulting generated schemas will still be at the + top-level key ``definitions``, so you can extract them from there. But all the references will have the set + prefix. + :param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful for + references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For a + sibling json file in a ``/schemas`` directory use ``"/schemas/${model}.json#"``. + :return: dict with the JSON Schema for the passed ``model`` + """ + model = get_model(model) + flat_models = get_flat_models_from_model(model) + model_name_map = get_model_name_map(flat_models) + model_name = model_name_map[model] + m_schema, m_definitions, nested_models = model_process_schema( + model, by_alias=by_alias, model_name_map=model_name_map, ref_prefix=ref_prefix, ref_template=ref_template + ) + if model_name in nested_models: + # model_name is in Nested models, it has circular references + m_definitions[model_name] = m_schema + m_schema = get_schema_ref(model_name, ref_prefix, ref_template, False) + if m_definitions: + m_schema.update({'definitions': m_definitions}) + return m_schema + + +def get_field_info_schema(field: ModelField, schema_overrides: bool = False) -> Tuple[Dict[str, Any], bool]: + + # If no title is explicitly set, we don't set title in the schema for enums. + # The behaviour is the same as `BaseModel` reference, where the default title + # is in the definitions part of the schema. + schema_: Dict[str, Any] = {} + if field.field_info.title or not lenient_issubclass(field.type_, Enum): + schema_['title'] = field.field_info.title or field.alias.title().replace('_', ' ') + + if field.field_info.title: + schema_overrides = True + + if field.field_info.description: + schema_['description'] = field.field_info.description + schema_overrides = True + + if not field.required and field.default is not None and not is_callable_type(field.outer_type_): + schema_['default'] = encode_default(field.default) + schema_overrides = True + + return schema_, schema_overrides + + +def field_schema( + field: ModelField, + *, + by_alias: bool = True, + model_name_map: Dict[TypeModelOrEnum, str], + ref_prefix: Optional[str] = None, + ref_template: str = default_ref_template, + known_models: TypeModelSet = None, +) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]: + """ + Process a Pydantic field and return a tuple with a JSON Schema for it as the first item. + Also return a dictionary of definitions with models as keys and their schemas as values. If the passed field + is a model and has sub-models, and those sub-models don't have overrides (as ``title``, ``default``, etc), they + will be included in the definitions and referenced in the schema instead of included recursively. + + :param field: a Pydantic ``ModelField`` + :param by_alias: use the defined alias (if any) in the returned schema + :param model_name_map: used to generate the JSON Schema references to other models included in the definitions + :param ref_prefix: the JSON Pointer prefix to use for references to other schemas, if None, the default of + #/definitions/ will be used + :param ref_template: Use a ``string.format()`` template for ``$ref`` instead of a prefix. This can be useful for + references that cannot be represented by ``ref_prefix`` such as a definition stored in another file. For a + sibling json file in a ``/schemas`` directory use ``"/schemas/${model}.json#"``. + :param known_models: used to solve circular references + :return: tuple of the schema for this field and additional definitions + """ + s, schema_overrides = get_field_info_schema(field) + + validation_schema = get_field_schema_validations(field) + if validation_schema: + s.update(validation_schema) + schema_overrides = True + + f_schema, f_definitions, f_nested_models = field_type_schema( + field, + by_alias=by_alias, + model_name_map=model_name_map, + schema_overrides=schema_overrides, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models or set(), + ) + + # $ref will only be returned when there are no schema_overrides + if '$ref' in f_schema: + return f_schema, f_definitions, f_nested_models + else: + s.update(f_schema) + return s, f_definitions, f_nested_models + + +numeric_types = (int, float, Decimal) +_str_types_attrs: Tuple[Tuple[str, Union[type, Tuple[type, ...]], str], ...] = ( + ('max_length', numeric_types, 'maxLength'), + ('min_length', numeric_types, 'minLength'), + ('regex', str, 'pattern'), +) + +_numeric_types_attrs: Tuple[Tuple[str, Union[type, Tuple[type, ...]], str], ...] = ( + ('gt', numeric_types, 'exclusiveMinimum'), + ('lt', numeric_types, 'exclusiveMaximum'), + ('ge', numeric_types, 'minimum'), + ('le', numeric_types, 'maximum'), + ('multiple_of', numeric_types, 'multipleOf'), +) + + +def get_field_schema_validations(field: ModelField) -> Dict[str, Any]: + """ + Get the JSON Schema validation keywords for a ``field`` with an annotation of + a Pydantic ``FieldInfo`` with validation arguments. + """ + f_schema: Dict[str, Any] = {} + + if lenient_issubclass(field.type_, Enum): + # schema is already updated by `enum_process_schema`; just update with field extra + if field.field_info.extra: + f_schema.update(field.field_info.extra) + return f_schema + + if lenient_issubclass(field.type_, (str, bytes)): + for attr_name, t, keyword in _str_types_attrs: + attr = getattr(field.field_info, attr_name, None) + if isinstance(attr, t): + f_schema[keyword] = attr + if lenient_issubclass(field.type_, numeric_types) and not issubclass(field.type_, bool): + for attr_name, t, keyword in _numeric_types_attrs: + attr = getattr(field.field_info, attr_name, None) + if isinstance(attr, t): + f_schema[keyword] = attr + if field.field_info is not None and field.field_info.const: + f_schema['const'] = field.default + if field.field_info.extra: + f_schema.update(field.field_info.extra) + modify_schema = getattr(field.outer_type_, '__modify_schema__', None) + if modify_schema: + _apply_modify_schema(modify_schema, field, f_schema) + return f_schema + + +def get_model_name_map(unique_models: TypeModelSet) -> Dict[TypeModelOrEnum, str]: + """ + Process a set of models and generate unique names for them to be used as keys in the JSON Schema + definitions. By default the names are the same as the class name. But if two models in different Python + modules have the same name (e.g. "users.Model" and "items.Model"), the generated names will be + based on the Python module path for those conflicting models to prevent name collisions. + + :param unique_models: a Python set of models + :return: dict mapping models to names + """ + name_model_map = {} + conflicting_names: Set[str] = set() + for model in unique_models: + model_name = normalize_name(model.__name__) + if model_name in conflicting_names: + model_name = get_long_model_name(model) + name_model_map[model_name] = model + elif model_name in name_model_map: + conflicting_names.add(model_name) + conflicting_model = name_model_map.pop(model_name) + name_model_map[get_long_model_name(conflicting_model)] = conflicting_model + name_model_map[get_long_model_name(model)] = model + else: + name_model_map[model_name] = model + return {v: k for k, v in name_model_map.items()} + + +def get_flat_models_from_model(model: Type['BaseModel'], known_models: TypeModelSet = None) -> TypeModelSet: + """ + Take a single ``model`` and generate a set with itself and all the sub-models in the tree. I.e. if you pass + model ``Foo`` (subclass of Pydantic ``BaseModel``) as ``model``, and it has a field of type ``Bar`` (also + subclass of ``BaseModel``) and that model ``Bar`` has a field of type ``Baz`` (also subclass of ``BaseModel``), + the return value will be ``set([Foo, Bar, Baz])``. + + :param model: a Pydantic ``BaseModel`` subclass + :param known_models: used to solve circular references + :return: a set with the initial model and all its sub-models + """ + known_models = known_models or set() + flat_models: TypeModelSet = set() + flat_models.add(model) + known_models |= flat_models + fields = cast(Sequence[ModelField], model.__fields__.values()) + flat_models |= get_flat_models_from_fields(fields, known_models=known_models) + return flat_models + + +def get_flat_models_from_field(field: ModelField, known_models: TypeModelSet) -> TypeModelSet: + """ + Take a single Pydantic ``ModelField`` (from a model) that could have been declared as a sublcass of BaseModel + (so, it could be a submodel), and generate a set with its model and all the sub-models in the tree. + I.e. if you pass a field that was declared to be of type ``Foo`` (subclass of BaseModel) as ``field``, and that + model ``Foo`` has a field of type ``Bar`` (also subclass of ``BaseModel``) and that model ``Bar`` has a field of + type ``Baz`` (also subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``. + + :param field: a Pydantic ``ModelField`` + :param known_models: used to solve circular references + :return: a set with the model used in the declaration for this field, if any, and all its sub-models + """ + from .main import BaseModel + + flat_models: TypeModelSet = set() + + field_type = field.type_ + if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel): + field_type = field_type.__pydantic_model__ + + if field.sub_fields and not lenient_issubclass(field_type, BaseModel): + flat_models |= get_flat_models_from_fields(field.sub_fields, known_models=known_models) + elif lenient_issubclass(field_type, BaseModel) and field_type not in known_models: + flat_models |= get_flat_models_from_model(field_type, known_models=known_models) + elif lenient_issubclass(field_type, Enum): + flat_models.add(field_type) + return flat_models + + +def get_flat_models_from_fields(fields: Sequence[ModelField], known_models: TypeModelSet) -> TypeModelSet: + """ + Take a list of Pydantic ``ModelField``s (from a model) that could have been declared as subclasses of ``BaseModel`` + (so, any of them could be a submodel), and generate a set with their models and all the sub-models in the tree. + I.e. if you pass a the fields of a model ``Foo`` (subclass of ``BaseModel``) as ``fields``, and on of them has a + field of type ``Bar`` (also subclass of ``BaseModel``) and that model ``Bar`` has a field of type ``Baz`` (also + subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``. + + :param fields: a list of Pydantic ``ModelField``s + :param known_models: used to solve circular references + :return: a set with any model declared in the fields, and all their sub-models + """ + flat_models: TypeModelSet = set() + for field in fields: + flat_models |= get_flat_models_from_field(field, known_models=known_models) + return flat_models + + +def get_flat_models_from_models(models: Sequence[Type['BaseModel']]) -> TypeModelSet: + """ + Take a list of ``models`` and generate a set with them and all their sub-models in their trees. I.e. if you pass + a list of two models, ``Foo`` and ``Bar``, both subclasses of Pydantic ``BaseModel`` as models, and ``Bar`` has + a field of type ``Baz`` (also subclass of ``BaseModel``), the return value will be ``set([Foo, Bar, Baz])``. + """ + flat_models: TypeModelSet = set() + for model in models: + flat_models |= get_flat_models_from_model(model) + return flat_models + + +def get_long_model_name(model: TypeModelOrEnum) -> str: + return f'{model.__module__}__{model.__qualname__}'.replace('.', '__') + + +def field_type_schema( + field: ModelField, + *, + by_alias: bool, + model_name_map: Dict[TypeModelOrEnum, str], + ref_template: str, + schema_overrides: bool = False, + ref_prefix: Optional[str] = None, + known_models: TypeModelSet, +) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]: + """ + Used by ``field_schema()``, you probably should be using that function. + + Take a single ``field`` and generate the schema for its type only, not including additional + information as title, etc. Also return additional schema definitions, from sub-models. + """ + from .main import BaseModel # noqa: F811 + + definitions = {} + nested_models: Set[str] = set() + f_schema: Dict[str, Any] + if field.shape in { + SHAPE_LIST, + SHAPE_TUPLE_ELLIPSIS, + SHAPE_SEQUENCE, + SHAPE_SET, + SHAPE_FROZENSET, + SHAPE_ITERABLE, + SHAPE_DEQUE, + }: + items_schema, f_definitions, f_nested_models = field_singleton_schema( + field, + by_alias=by_alias, + model_name_map=model_name_map, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + definitions.update(f_definitions) + nested_models.update(f_nested_models) + f_schema = {'type': 'array', 'items': items_schema} + if field.shape in {SHAPE_SET, SHAPE_FROZENSET}: + f_schema['uniqueItems'] = True + + elif field.shape in MAPPING_LIKE_SHAPES: + f_schema = {'type': 'object'} + key_field = cast(ModelField, field.key_field) + regex = getattr(key_field.type_, 'regex', None) + items_schema, f_definitions, f_nested_models = field_singleton_schema( + field, + by_alias=by_alias, + model_name_map=model_name_map, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + definitions.update(f_definitions) + nested_models.update(f_nested_models) + if regex: + # Dict keys have a regex pattern + # items_schema might be a schema or empty dict, add it either way + f_schema['patternProperties'] = {regex.pattern: items_schema} + elif items_schema: + # The dict values are not simply Any, so they need a schema + f_schema['additionalProperties'] = items_schema + elif field.shape == SHAPE_TUPLE or (field.shape == SHAPE_GENERIC and not issubclass(field.type_, BaseModel)): + sub_schema = [] + sub_fields = cast(List[ModelField], field.sub_fields) + for sf in sub_fields: + sf_schema, sf_definitions, sf_nested_models = field_type_schema( + sf, + by_alias=by_alias, + model_name_map=model_name_map, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + definitions.update(sf_definitions) + nested_models.update(sf_nested_models) + sub_schema.append(sf_schema) + + sub_fields_len = len(sub_fields) + if field.shape == SHAPE_GENERIC: + all_of_schemas = sub_schema[0] if sub_fields_len == 1 else {'type': 'array', 'items': sub_schema} + f_schema = {'allOf': [all_of_schemas]} + else: + f_schema = { + 'type': 'array', + 'minItems': sub_fields_len, + 'maxItems': sub_fields_len, + } + if sub_fields_len >= 1: + f_schema['items'] = sub_schema + else: + assert field.shape in {SHAPE_SINGLETON, SHAPE_GENERIC}, field.shape + f_schema, f_definitions, f_nested_models = field_singleton_schema( + field, + by_alias=by_alias, + model_name_map=model_name_map, + schema_overrides=schema_overrides, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + definitions.update(f_definitions) + nested_models.update(f_nested_models) + + # check field type to avoid repeated calls to the same __modify_schema__ method + if field.type_ != field.outer_type_: + if field.shape == SHAPE_GENERIC: + field_type = field.type_ + else: + field_type = field.outer_type_ + modify_schema = getattr(field_type, '__modify_schema__', None) + if modify_schema: + _apply_modify_schema(modify_schema, field, f_schema) + return f_schema, definitions, nested_models + + +def model_process_schema( + model: TypeModelOrEnum, + *, + by_alias: bool = True, + model_name_map: Dict[TypeModelOrEnum, str], + ref_prefix: Optional[str] = None, + ref_template: str = default_ref_template, + known_models: TypeModelSet = None, + field: Optional[ModelField] = None, +) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]: + """ + Used by ``model_schema()``, you probably should be using that function. + + Take a single ``model`` and generate its schema. Also return additional schema definitions, from sub-models. The + sub-models of the returned schema will be referenced, but their definitions will not be included in the schema. All + the definitions are returned as the second value. + """ + from inspect import getdoc, signature + + known_models = known_models or set() + if lenient_issubclass(model, Enum): + model = cast(Type[Enum], model) + s = enum_process_schema(model, field=field) + return s, {}, set() + model = cast(Type['BaseModel'], model) + s = {'title': model.__config__.title or model.__name__} + doc = getdoc(model) + if doc: + s['description'] = doc + known_models.add(model) + m_schema, m_definitions, nested_models = model_type_schema( + model, + by_alias=by_alias, + model_name_map=model_name_map, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + s.update(m_schema) + schema_extra = model.__config__.schema_extra + if callable(schema_extra): + if len(signature(schema_extra).parameters) == 1: + schema_extra(s) + else: + schema_extra(s, model) + else: + s.update(schema_extra) + return s, m_definitions, nested_models + + +def model_type_schema( + model: Type['BaseModel'], + *, + by_alias: bool, + model_name_map: Dict[TypeModelOrEnum, str], + ref_template: str, + ref_prefix: Optional[str] = None, + known_models: TypeModelSet, +) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]: + """ + You probably should be using ``model_schema()``, this function is indirectly used by that function. + + Take a single ``model`` and generate the schema for its type only, not including additional + information as title, etc. Also return additional schema definitions, from sub-models. + """ + properties = {} + required = [] + definitions: Dict[str, Any] = {} + nested_models: Set[str] = set() + for k, f in model.__fields__.items(): + try: + f_schema, f_definitions, f_nested_models = field_schema( + f, + by_alias=by_alias, + model_name_map=model_name_map, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + except SkipField as skip: + warnings.warn(skip.message, UserWarning) + continue + definitions.update(f_definitions) + nested_models.update(f_nested_models) + if by_alias: + properties[f.alias] = f_schema + if f.required: + required.append(f.alias) + else: + properties[k] = f_schema + if f.required: + required.append(k) + if ROOT_KEY in properties: + out_schema = properties[ROOT_KEY] + out_schema['title'] = model.__config__.title or model.__name__ + else: + out_schema = {'type': 'object', 'properties': properties} + if required: + out_schema['required'] = required + if model.__config__.extra == 'forbid': + out_schema['additionalProperties'] = False + return out_schema, definitions, nested_models + + +def enum_process_schema(enum: Type[Enum], *, field: Optional[ModelField] = None) -> Dict[str, Any]: + """ + Take a single `enum` and generate its schema. + + This is similar to the `model_process_schema` function, but applies to ``Enum`` objects. + """ + schema_: Dict[str, Any] = { + 'title': enum.__name__, + # Python assigns all enums a default docstring value of 'An enumeration', so + # all enums will have a description field even if not explicitly provided. + 'description': enum.__doc__ or 'An enumeration.', + # Add enum values and the enum field type to the schema. + 'enum': [item.value for item in cast(Iterable[Enum], enum)], + } + + add_field_type_to_schema(enum, schema_) + + modify_schema = getattr(enum, '__modify_schema__', None) + if modify_schema: + _apply_modify_schema(modify_schema, field, schema_) + + return schema_ + + +def field_singleton_sub_fields_schema( + field: ModelField, + *, + by_alias: bool, + model_name_map: Dict[TypeModelOrEnum, str], + ref_template: str, + schema_overrides: bool = False, + ref_prefix: Optional[str] = None, + known_models: TypeModelSet, +) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]: + """ + This function is indirectly used by ``field_schema()``, you probably should be using that function. + + Take a list of Pydantic ``ModelField`` from the declaration of a type with parameters, and generate their + schema. I.e., fields used as "type parameters", like ``str`` and ``int`` in ``Tuple[str, int]``. + """ + sub_fields = cast(List[ModelField], field.sub_fields) + definitions = {} + nested_models: Set[str] = set() + if len(sub_fields) == 1: + return field_type_schema( + sub_fields[0], + by_alias=by_alias, + model_name_map=model_name_map, + schema_overrides=schema_overrides, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + else: + s: Dict[str, Any] = {} + # https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#discriminator-object + field_has_discriminator: bool = field.discriminator_key is not None + if field_has_discriminator: + assert field.sub_fields_mapping is not None + + discriminator_models_refs: Dict[str, Union[str, Dict[str, Any]]] = {} + + for discriminator_value, sub_field in field.sub_fields_mapping.items(): + # sub_field is either a `BaseModel` or directly an `Annotated` `Union` of many + if is_union(get_origin(sub_field.type_)): + sub_models = get_sub_types(sub_field.type_) + discriminator_models_refs[discriminator_value] = { + model_name_map[sub_model]: get_schema_ref( + model_name_map[sub_model], ref_prefix, ref_template, False + ) + for sub_model in sub_models + } + else: + sub_field_type = sub_field.type_ + if hasattr(sub_field_type, '__pydantic_model__'): + sub_field_type = sub_field_type.__pydantic_model__ + + discriminator_model_name = model_name_map[sub_field_type] + discriminator_model_ref = get_schema_ref(discriminator_model_name, ref_prefix, ref_template, False) + discriminator_models_refs[discriminator_value] = discriminator_model_ref['$ref'] + + s['discriminator'] = { + 'propertyName': field.discriminator_alias, + 'mapping': discriminator_models_refs, + } + + sub_field_schemas = [] + for sf in sub_fields: + sub_schema, sub_definitions, sub_nested_models = field_type_schema( + sf, + by_alias=by_alias, + model_name_map=model_name_map, + schema_overrides=schema_overrides, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + definitions.update(sub_definitions) + if schema_overrides and 'allOf' in sub_schema: + # if the sub_field is a referenced schema we only need the referenced + # object. Otherwise we will end up with several allOf inside anyOf/oneOf. + # See https://github.com/pydantic/pydantic/issues/1209 + sub_schema = sub_schema['allOf'][0] + + if sub_schema.keys() == {'discriminator', 'oneOf'}: + # we don't want discriminator information inside oneOf choices, this is dealt with elsewhere + sub_schema.pop('discriminator') + sub_field_schemas.append(sub_schema) + nested_models.update(sub_nested_models) + s['oneOf' if field_has_discriminator else 'anyOf'] = sub_field_schemas + return s, definitions, nested_models + + +# Order is important, e.g. subclasses of str must go before str +# this is used only for standard library types, custom types should use __modify_schema__ instead +field_class_to_schema: Tuple[Tuple[Any, Dict[str, Any]], ...] = ( + (Path, {'type': 'string', 'format': 'path'}), + (datetime, {'type': 'string', 'format': 'date-time'}), + (date, {'type': 'string', 'format': 'date'}), + (time, {'type': 'string', 'format': 'time'}), + (timedelta, {'type': 'number', 'format': 'time-delta'}), + (IPv4Network, {'type': 'string', 'format': 'ipv4network'}), + (IPv6Network, {'type': 'string', 'format': 'ipv6network'}), + (IPv4Interface, {'type': 'string', 'format': 'ipv4interface'}), + (IPv6Interface, {'type': 'string', 'format': 'ipv6interface'}), + (IPv4Address, {'type': 'string', 'format': 'ipv4'}), + (IPv6Address, {'type': 'string', 'format': 'ipv6'}), + (Pattern, {'type': 'string', 'format': 'regex'}), + (str, {'type': 'string'}), + (bytes, {'type': 'string', 'format': 'binary'}), + (bool, {'type': 'boolean'}), + (int, {'type': 'integer'}), + (float, {'type': 'number'}), + (Decimal, {'type': 'number'}), + (UUID, {'type': 'string', 'format': 'uuid'}), + (dict, {'type': 'object'}), + (list, {'type': 'array', 'items': {}}), + (tuple, {'type': 'array', 'items': {}}), + (set, {'type': 'array', 'items': {}, 'uniqueItems': True}), + (frozenset, {'type': 'array', 'items': {}, 'uniqueItems': True}), +) + +json_scheme = {'type': 'string', 'format': 'json-string'} + + +def add_field_type_to_schema(field_type: Any, schema_: Dict[str, Any]) -> None: + """ + Update the given `schema` with the type-specific metadata for the given `field_type`. + + This function looks through `field_class_to_schema` for a class that matches the given `field_type`, + and then modifies the given `schema` with the information from that type. + """ + for type_, t_schema in field_class_to_schema: + # Fallback for `typing.Pattern` and `re.Pattern` as they are not a valid class + if lenient_issubclass(field_type, type_) or field_type is type_ is Pattern: + schema_.update(t_schema) + break + + +def get_schema_ref(name: str, ref_prefix: Optional[str], ref_template: str, schema_overrides: bool) -> Dict[str, Any]: + if ref_prefix: + schema_ref = {'$ref': ref_prefix + name} + else: + schema_ref = {'$ref': ref_template.format(model=name)} + return {'allOf': [schema_ref]} if schema_overrides else schema_ref + + +def field_singleton_schema( # noqa: C901 (ignore complexity) + field: ModelField, + *, + by_alias: bool, + model_name_map: Dict[TypeModelOrEnum, str], + ref_template: str, + schema_overrides: bool = False, + ref_prefix: Optional[str] = None, + known_models: TypeModelSet, +) -> Tuple[Dict[str, Any], Dict[str, Any], Set[str]]: + """ + This function is indirectly used by ``field_schema()``, you should probably be using that function. + + Take a single Pydantic ``ModelField``, and return its schema and any additional definitions from sub-models. + """ + from .main import BaseModel + + definitions: Dict[str, Any] = {} + nested_models: Set[str] = set() + field_type = field.type_ + + # Recurse into this field if it contains sub_fields and is NOT a + # BaseModel OR that BaseModel is a const + if field.sub_fields and ( + (field.field_info and field.field_info.const) or not lenient_issubclass(field_type, BaseModel) + ): + return field_singleton_sub_fields_schema( + field, + by_alias=by_alias, + model_name_map=model_name_map, + schema_overrides=schema_overrides, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + if field_type is Any or field_type is object or field_type.__class__ == TypeVar or get_origin(field_type) is type: + return {}, definitions, nested_models # no restrictions + if is_none_type(field_type): + return {'type': 'null'}, definitions, nested_models + if is_callable_type(field_type): + raise SkipField(f'Callable {field.name} was excluded from schema since JSON schema has no equivalent type.') + f_schema: Dict[str, Any] = {} + if field.field_info is not None and field.field_info.const: + f_schema['const'] = field.default + + if is_literal_type(field_type): + values = all_literal_values(field_type) + + if len({v.__class__ for v in values}) > 1: + return field_schema( + multitypes_literal_field_for_schema(values, field), + by_alias=by_alias, + model_name_map=model_name_map, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + ) + + # All values have the same type + field_type = values[0].__class__ + f_schema['enum'] = list(values) + add_field_type_to_schema(field_type, f_schema) + elif lenient_issubclass(field_type, Enum): + enum_name = model_name_map[field_type] + f_schema, schema_overrides = get_field_info_schema(field, schema_overrides) + f_schema.update(get_schema_ref(enum_name, ref_prefix, ref_template, schema_overrides)) + definitions[enum_name] = enum_process_schema(field_type, field=field) + elif is_namedtuple(field_type): + sub_schema, *_ = model_process_schema( + field_type.__pydantic_model__, + by_alias=by_alias, + model_name_map=model_name_map, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + field=field, + ) + items_schemas = list(sub_schema['properties'].values()) + f_schema.update( + { + 'type': 'array', + 'items': items_schemas, + 'minItems': len(items_schemas), + 'maxItems': len(items_schemas), + } + ) + elif not hasattr(field_type, '__pydantic_model__'): + add_field_type_to_schema(field_type, f_schema) + + modify_schema = getattr(field_type, '__modify_schema__', None) + if modify_schema: + _apply_modify_schema(modify_schema, field, f_schema) + + if f_schema: + return f_schema, definitions, nested_models + + # Handle dataclass-based models + if lenient_issubclass(getattr(field_type, '__pydantic_model__', None), BaseModel): + field_type = field_type.__pydantic_model__ + + if issubclass(field_type, BaseModel): + model_name = model_name_map[field_type] + if field_type not in known_models: + sub_schema, sub_definitions, sub_nested_models = model_process_schema( + field_type, + by_alias=by_alias, + model_name_map=model_name_map, + ref_prefix=ref_prefix, + ref_template=ref_template, + known_models=known_models, + field=field, + ) + definitions.update(sub_definitions) + definitions[model_name] = sub_schema + nested_models.update(sub_nested_models) + else: + nested_models.add(model_name) + schema_ref = get_schema_ref(model_name, ref_prefix, ref_template, schema_overrides) + return schema_ref, definitions, nested_models + + # For generics with no args + args = get_args(field_type) + if args is not None and not args and Generic in field_type.__bases__: + return f_schema, definitions, nested_models + + raise ValueError(f'Value not declarable with JSON Schema, field: {field}') + + +def multitypes_literal_field_for_schema(values: Tuple[Any, ...], field: ModelField) -> ModelField: + """ + To support `Literal` with values of different types, we split it into multiple `Literal` with same type + e.g. `Literal['qwe', 'asd', 1, 2]` becomes `Union[Literal['qwe', 'asd'], Literal[1, 2]]` + """ + literal_distinct_types = defaultdict(list) + for v in values: + literal_distinct_types[v.__class__].append(v) + distinct_literals = (Literal[tuple(same_type_values)] for same_type_values in literal_distinct_types.values()) + + return ModelField( + name=field.name, + type_=Union[tuple(distinct_literals)], # type: ignore + class_validators=field.class_validators, + model_config=field.model_config, + default=field.default, + required=field.required, + alias=field.alias, + field_info=field.field_info, + ) + + +def encode_default(dft: Any) -> Any: + if isinstance(dft, Enum): + return dft.value + elif isinstance(dft, (int, float, str)): + return dft + elif isinstance(dft, (list, tuple)): + t = dft.__class__ + seq_args = (encode_default(v) for v in dft) + return t(*seq_args) if is_namedtuple(t) else t(seq_args) + elif isinstance(dft, dict): + return {encode_default(k): encode_default(v) for k, v in dft.items()} + elif dft is None: + return None + else: + return pydantic_encoder(dft) + + +_map_types_constraint: Dict[Any, Callable[..., type]] = {int: conint, float: confloat, Decimal: condecimal} + + +def get_annotation_from_field_info( + annotation: Any, field_info: FieldInfo, field_name: str, validate_assignment: bool = False +) -> Type[Any]: + """ + Get an annotation with validation implemented for numbers and strings based on the field_info. + :param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr`` + :param field_info: an instance of FieldInfo, possibly with declarations for validations and JSON Schema + :param field_name: name of the field for use in error messages + :param validate_assignment: default False, flag for BaseModel Config value of validate_assignment + :return: the same ``annotation`` if unmodified or a new annotation with validation in place + """ + constraints = field_info.get_constraints() + used_constraints: Set[str] = set() + if constraints: + annotation, used_constraints = get_annotation_with_constraints(annotation, field_info) + if validate_assignment: + used_constraints.add('allow_mutation') + + unused_constraints = constraints - used_constraints + if unused_constraints: + raise ValueError( + f'On field "{field_name}" the following field constraints are set but not enforced: ' + f'{", ".join(unused_constraints)}. ' + f'\nFor more details see https://pydantic-docs.helpmanual.io/usage/schema/#unenforced-field-constraints' + ) + + return annotation + + +def get_annotation_with_constraints(annotation: Any, field_info: FieldInfo) -> Tuple[Type[Any], Set[str]]: # noqa: C901 + """ + Get an annotation with used constraints implemented for numbers and strings based on the field_info. + + :param annotation: an annotation from a field specification, as ``str``, ``ConstrainedStr`` + :param field_info: an instance of FieldInfo, possibly with declarations for validations and JSON Schema + :return: the same ``annotation`` if unmodified or a new annotation along with the used constraints. + """ + used_constraints: Set[str] = set() + + def go(type_: Any) -> Type[Any]: + if ( + is_literal_type(type_) + or isinstance(type_, ForwardRef) + or lenient_issubclass(type_, (ConstrainedList, ConstrainedSet, ConstrainedFrozenSet)) + ): + return type_ + origin = get_origin(type_) + if origin is not None: + args: Tuple[Any, ...] = get_args(type_) + if any(isinstance(a, ForwardRef) for a in args): + # forward refs cause infinite recursion below + return type_ + + if origin is Annotated: + return go(args[0]) + if is_union(origin): + return Union[tuple(go(a) for a in args)] # type: ignore + + if issubclass(origin, List) and ( + field_info.min_items is not None + or field_info.max_items is not None + or field_info.unique_items is not None + ): + used_constraints.update({'min_items', 'max_items', 'unique_items'}) + return conlist( + go(args[0]), + min_items=field_info.min_items, + max_items=field_info.max_items, + unique_items=field_info.unique_items, + ) + + if issubclass(origin, Set) and (field_info.min_items is not None or field_info.max_items is not None): + used_constraints.update({'min_items', 'max_items'}) + return conset(go(args[0]), min_items=field_info.min_items, max_items=field_info.max_items) + + if issubclass(origin, FrozenSet) and (field_info.min_items is not None or field_info.max_items is not None): + used_constraints.update({'min_items', 'max_items'}) + return confrozenset(go(args[0]), min_items=field_info.min_items, max_items=field_info.max_items) + + for t in (Tuple, List, Set, FrozenSet, Sequence): + if issubclass(origin, t): # type: ignore + return t[tuple(go(a) for a in args)] # type: ignore + + if issubclass(origin, Dict): + return Dict[args[0], go(args[1])] # type: ignore + + attrs: Optional[Tuple[str, ...]] = None + constraint_func: Optional[Callable[..., type]] = None + if isinstance(type_, type): + if issubclass(type_, (SecretStr, SecretBytes)): + attrs = ('max_length', 'min_length') + + def constraint_func(**kw: Any) -> Type[Any]: + return type(type_.__name__, (type_,), kw) + + elif issubclass(type_, str) and not issubclass(type_, (EmailStr, AnyUrl)): + attrs = ('max_length', 'min_length', 'regex') + if issubclass(type_, StrictStr): + + def constraint_func(**kw: Any) -> Type[Any]: + return type(type_.__name__, (type_,), kw) + + else: + constraint_func = constr + elif issubclass(type_, bytes): + attrs = ('max_length', 'min_length', 'regex') + if issubclass(type_, StrictBytes): + + def constraint_func(**kw: Any) -> Type[Any]: + return type(type_.__name__, (type_,), kw) + + else: + constraint_func = conbytes + elif issubclass(type_, numeric_types) and not issubclass( + type_, + ( + ConstrainedInt, + ConstrainedFloat, + ConstrainedDecimal, + ConstrainedList, + ConstrainedSet, + ConstrainedFrozenSet, + bool, + ), + ): + # Is numeric type + attrs = ('gt', 'lt', 'ge', 'le', 'multiple_of') + if issubclass(type_, float): + attrs += ('allow_inf_nan',) + if issubclass(type_, Decimal): + attrs += ('max_digits', 'decimal_places') + numeric_type = next(t for t in numeric_types if issubclass(type_, t)) # pragma: no branch + constraint_func = _map_types_constraint[numeric_type] + + if attrs: + used_constraints.update(set(attrs)) + kwargs = { + attr_name: attr + for attr_name, attr in ((attr_name, getattr(field_info, attr_name)) for attr_name in attrs) + if attr is not None + } + if kwargs: + constraint_func = cast(Callable[..., type], constraint_func) + return constraint_func(**kwargs) + return type_ + + return go(annotation), used_constraints + + +def normalize_name(name: str) -> str: + """ + Normalizes the given name. This can be applied to either a model *or* enum. + """ + return re.sub(r'[^a-zA-Z0-9.\-_]', '_', name) + + +class SkipField(Exception): + """ + Utility exception used to exclude fields from schema. + """ + + def __init__(self, message: str) -> None: + self.message = message diff --git a/lib/pydantic/tools.py b/lib/pydantic/tools.py new file mode 100644 index 00000000..9cdb4538 --- /dev/null +++ b/lib/pydantic/tools.py @@ -0,0 +1,92 @@ +import json +from functools import lru_cache +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Optional, Type, TypeVar, Union + +from .parse import Protocol, load_file, load_str_bytes +from .types import StrBytes +from .typing import display_as_type + +__all__ = ('parse_file_as', 'parse_obj_as', 'parse_raw_as', 'schema_of', 'schema_json_of') + +NameFactory = Union[str, Callable[[Type[Any]], str]] + +if TYPE_CHECKING: + from .typing import DictStrAny + + +def _generate_parsing_type_name(type_: Any) -> str: + return f'ParsingModel[{display_as_type(type_)}]' + + +@lru_cache(maxsize=2048) +def _get_parsing_type(type_: Any, *, type_name: Optional[NameFactory] = None) -> Any: + from pydantic.main import create_model + + if type_name is None: + type_name = _generate_parsing_type_name + if not isinstance(type_name, str): + type_name = type_name(type_) + return create_model(type_name, __root__=(type_, ...)) + + +T = TypeVar('T') + + +def parse_obj_as(type_: Type[T], obj: Any, *, type_name: Optional[NameFactory] = None) -> T: + model_type = _get_parsing_type(type_, type_name=type_name) # type: ignore[arg-type] + return model_type(__root__=obj).__root__ + + +def parse_file_as( + type_: Type[T], + path: Union[str, Path], + *, + content_type: str = None, + encoding: str = 'utf8', + proto: Protocol = None, + allow_pickle: bool = False, + json_loads: Callable[[str], Any] = json.loads, + type_name: Optional[NameFactory] = None, +) -> T: + obj = load_file( + path, + proto=proto, + content_type=content_type, + encoding=encoding, + allow_pickle=allow_pickle, + json_loads=json_loads, + ) + return parse_obj_as(type_, obj, type_name=type_name) + + +def parse_raw_as( + type_: Type[T], + b: StrBytes, + *, + content_type: str = None, + encoding: str = 'utf8', + proto: Protocol = None, + allow_pickle: bool = False, + json_loads: Callable[[str], Any] = json.loads, + type_name: Optional[NameFactory] = None, +) -> T: + obj = load_str_bytes( + b, + proto=proto, + content_type=content_type, + encoding=encoding, + allow_pickle=allow_pickle, + json_loads=json_loads, + ) + return parse_obj_as(type_, obj, type_name=type_name) + + +def schema_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_kwargs: Any) -> 'DictStrAny': + """Generate a JSON schema (as dict) for the passed model or dynamically generated one""" + return _get_parsing_type(type_, type_name=title).schema(**schema_kwargs) + + +def schema_json_of(type_: Any, *, title: Optional[NameFactory] = None, **schema_json_kwargs: Any) -> str: + """Generate a JSON schema (as JSON) for the passed model or dynamically generated one""" + return _get_parsing_type(type_, type_name=title).schema_json(**schema_json_kwargs) diff --git a/lib/pydantic/types.py b/lib/pydantic/types.py new file mode 100644 index 00000000..f98dba3d --- /dev/null +++ b/lib/pydantic/types.py @@ -0,0 +1,1187 @@ +import abc +import math +import re +import warnings +from datetime import date +from decimal import Decimal +from enum import Enum +from pathlib import Path +from types import new_class +from typing import ( + TYPE_CHECKING, + Any, + Callable, + ClassVar, + Dict, + FrozenSet, + List, + Optional, + Pattern, + Set, + Tuple, + Type, + TypeVar, + Union, + cast, + overload, +) +from uuid import UUID +from weakref import WeakSet + +from . import errors +from .datetime_parse import parse_date +from .utils import import_string, update_not_none +from .validators import ( + bytes_validator, + constr_length_validator, + constr_lower, + constr_strip_whitespace, + constr_upper, + decimal_validator, + float_finite_validator, + float_validator, + frozenset_validator, + int_validator, + list_validator, + number_multiple_validator, + number_size_validator, + path_exists_validator, + path_validator, + set_validator, + str_validator, + strict_bytes_validator, + strict_float_validator, + strict_int_validator, + strict_str_validator, +) + +__all__ = [ + 'NoneStr', + 'NoneBytes', + 'StrBytes', + 'NoneStrBytes', + 'StrictStr', + 'ConstrainedBytes', + 'conbytes', + 'ConstrainedList', + 'conlist', + 'ConstrainedSet', + 'conset', + 'ConstrainedFrozenSet', + 'confrozenset', + 'ConstrainedStr', + 'constr', + 'PyObject', + 'ConstrainedInt', + 'conint', + 'PositiveInt', + 'NegativeInt', + 'NonNegativeInt', + 'NonPositiveInt', + 'ConstrainedFloat', + 'confloat', + 'PositiveFloat', + 'NegativeFloat', + 'NonNegativeFloat', + 'NonPositiveFloat', + 'FiniteFloat', + 'ConstrainedDecimal', + 'condecimal', + 'UUID1', + 'UUID3', + 'UUID4', + 'UUID5', + 'FilePath', + 'DirectoryPath', + 'Json', + 'JsonWrapper', + 'SecretField', + 'SecretStr', + 'SecretBytes', + 'StrictBool', + 'StrictBytes', + 'StrictInt', + 'StrictFloat', + 'PaymentCardNumber', + 'ByteSize', + 'PastDate', + 'FutureDate', + 'ConstrainedDate', + 'condate', +] + +NoneStr = Optional[str] +NoneBytes = Optional[bytes] +StrBytes = Union[str, bytes] +NoneStrBytes = Optional[StrBytes] +OptionalInt = Optional[int] +OptionalIntFloat = Union[OptionalInt, float] +OptionalIntFloatDecimal = Union[OptionalIntFloat, Decimal] +OptionalDate = Optional[date] +StrIntFloat = Union[str, int, float] + +if TYPE_CHECKING: + from typing_extensions import Annotated + + from .dataclasses import Dataclass + from .main import BaseModel + from .typing import CallableGenerator + + ModelOrDc = Type[Union[BaseModel, Dataclass]] + +T = TypeVar('T') +_DEFINED_TYPES: 'WeakSet[type]' = WeakSet() + + +@overload +def _registered(typ: Type[T]) -> Type[T]: + pass + + +@overload +def _registered(typ: 'ConstrainedNumberMeta') -> 'ConstrainedNumberMeta': + pass + + +def _registered(typ: Union[Type[T], 'ConstrainedNumberMeta']) -> Union[Type[T], 'ConstrainedNumberMeta']: + # In order to generate valid examples of constrained types, Hypothesis needs + # to inspect the type object - so we keep a weakref to each contype object + # until it can be registered. When (or if) our Hypothesis plugin is loaded, + # it monkeypatches this function. + # If Hypothesis is never used, the total effect is to keep a weak reference + # which has minimal memory usage and doesn't even affect garbage collection. + _DEFINED_TYPES.add(typ) + return typ + + +class ConstrainedNumberMeta(type): + def __new__(cls, name: str, bases: Any, dct: Dict[str, Any]) -> 'ConstrainedInt': # type: ignore + new_cls = cast('ConstrainedInt', type.__new__(cls, name, bases, dct)) + + if new_cls.gt is not None and new_cls.ge is not None: + raise errors.ConfigError('bounds gt and ge cannot be specified at the same time') + if new_cls.lt is not None and new_cls.le is not None: + raise errors.ConfigError('bounds lt and le cannot be specified at the same time') + + return _registered(new_cls) # type: ignore + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BOOLEAN TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +if TYPE_CHECKING: + StrictBool = bool +else: + + class StrictBool(int): + """ + StrictBool to allow for bools which are not type-coerced. + """ + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='boolean') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, value: Any) -> bool: + """ + Ensure that we only allow bools. + """ + if isinstance(value, bool): + return value + + raise errors.StrictBoolError() + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ INTEGER TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedInt(int, metaclass=ConstrainedNumberMeta): + strict: bool = False + gt: OptionalInt = None + ge: OptionalInt = None + lt: OptionalInt = None + le: OptionalInt = None + multiple_of: OptionalInt = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + exclusiveMinimum=cls.gt, + exclusiveMaximum=cls.lt, + minimum=cls.ge, + maximum=cls.le, + multipleOf=cls.multiple_of, + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield strict_int_validator if cls.strict else int_validator + yield number_size_validator + yield number_multiple_validator + + +def conint( + *, strict: bool = False, gt: int = None, ge: int = None, lt: int = None, le: int = None, multiple_of: int = None +) -> Type[int]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of) + return type('ConstrainedIntValue', (ConstrainedInt,), namespace) + + +if TYPE_CHECKING: + PositiveInt = int + NegativeInt = int + NonPositiveInt = int + NonNegativeInt = int + StrictInt = int +else: + + class PositiveInt(ConstrainedInt): + gt = 0 + + class NegativeInt(ConstrainedInt): + lt = 0 + + class NonPositiveInt(ConstrainedInt): + le = 0 + + class NonNegativeInt(ConstrainedInt): + ge = 0 + + class StrictInt(ConstrainedInt): + strict = True + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FLOAT TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedFloat(float, metaclass=ConstrainedNumberMeta): + strict: bool = False + gt: OptionalIntFloat = None + ge: OptionalIntFloat = None + lt: OptionalIntFloat = None + le: OptionalIntFloat = None + multiple_of: OptionalIntFloat = None + allow_inf_nan: Optional[bool] = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + exclusiveMinimum=cls.gt, + exclusiveMaximum=cls.lt, + minimum=cls.ge, + maximum=cls.le, + multipleOf=cls.multiple_of, + ) + # Modify constraints to account for differences between IEEE floats and JSON + if field_schema.get('exclusiveMinimum') == -math.inf: + del field_schema['exclusiveMinimum'] + if field_schema.get('minimum') == -math.inf: + del field_schema['minimum'] + if field_schema.get('exclusiveMaximum') == math.inf: + del field_schema['exclusiveMaximum'] + if field_schema.get('maximum') == math.inf: + del field_schema['maximum'] + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield strict_float_validator if cls.strict else float_validator + yield number_size_validator + yield number_multiple_validator + yield float_finite_validator + + +def confloat( + *, + strict: bool = False, + gt: float = None, + ge: float = None, + lt: float = None, + le: float = None, + multiple_of: float = None, + allow_inf_nan: Optional[bool] = None, +) -> Type[float]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict(strict=strict, gt=gt, ge=ge, lt=lt, le=le, multiple_of=multiple_of, allow_inf_nan=allow_inf_nan) + return type('ConstrainedFloatValue', (ConstrainedFloat,), namespace) + + +if TYPE_CHECKING: + PositiveFloat = float + NegativeFloat = float + NonPositiveFloat = float + NonNegativeFloat = float + StrictFloat = float + FiniteFloat = float +else: + + class PositiveFloat(ConstrainedFloat): + gt = 0 + + class NegativeFloat(ConstrainedFloat): + lt = 0 + + class NonPositiveFloat(ConstrainedFloat): + le = 0 + + class NonNegativeFloat(ConstrainedFloat): + ge = 0 + + class StrictFloat(ConstrainedFloat): + strict = True + + class FiniteFloat(ConstrainedFloat): + allow_inf_nan = False + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BYTES TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedBytes(bytes): + strip_whitespace = False + to_upper = False + to_lower = False + min_length: OptionalInt = None + max_length: OptionalInt = None + strict: bool = False + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, minLength=cls.min_length, maxLength=cls.max_length) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield strict_bytes_validator if cls.strict else bytes_validator + yield constr_strip_whitespace + yield constr_upper + yield constr_lower + yield constr_length_validator + + +def conbytes( + *, + strip_whitespace: bool = False, + to_upper: bool = False, + to_lower: bool = False, + min_length: int = None, + max_length: int = None, + strict: bool = False, +) -> Type[bytes]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict( + strip_whitespace=strip_whitespace, + to_upper=to_upper, + to_lower=to_lower, + min_length=min_length, + max_length=max_length, + strict=strict, + ) + return _registered(type('ConstrainedBytesValue', (ConstrainedBytes,), namespace)) + + +if TYPE_CHECKING: + StrictBytes = bytes +else: + + class StrictBytes(ConstrainedBytes): + strict = True + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ STRING TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedStr(str): + strip_whitespace = False + to_upper = False + to_lower = False + min_length: OptionalInt = None + max_length: OptionalInt = None + curtail_length: OptionalInt = None + regex: Optional[Pattern[str]] = None + strict = False + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + minLength=cls.min_length, + maxLength=cls.max_length, + pattern=cls.regex and cls.regex.pattern, + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield strict_str_validator if cls.strict else str_validator + yield constr_strip_whitespace + yield constr_upper + yield constr_lower + yield constr_length_validator + yield cls.validate + + @classmethod + def validate(cls, value: Union[str]) -> Union[str]: + if cls.curtail_length and len(value) > cls.curtail_length: + value = value[: cls.curtail_length] + + if cls.regex: + if not cls.regex.match(value): + raise errors.StrRegexError(pattern=cls.regex.pattern) + + return value + + +def constr( + *, + strip_whitespace: bool = False, + to_upper: bool = False, + to_lower: bool = False, + strict: bool = False, + min_length: int = None, + max_length: int = None, + curtail_length: int = None, + regex: str = None, +) -> Type[str]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict( + strip_whitespace=strip_whitespace, + to_upper=to_upper, + to_lower=to_lower, + strict=strict, + min_length=min_length, + max_length=max_length, + curtail_length=curtail_length, + regex=regex and re.compile(regex), + ) + return _registered(type('ConstrainedStrValue', (ConstrainedStr,), namespace)) + + +if TYPE_CHECKING: + StrictStr = str +else: + + class StrictStr(ConstrainedStr): + strict = True + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +# This types superclass should be Set[T], but cython chokes on that... +class ConstrainedSet(set): # type: ignore + # Needed for pydantic to detect that this is a set + __origin__ = set + __args__: Set[Type[T]] # type: ignore + + min_items: Optional[int] = None + max_items: Optional[int] = None + item_type: Type[T] # type: ignore + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.set_length_validator + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items) + + @classmethod + def set_length_validator(cls, v: 'Optional[Set[T]]') -> 'Optional[Set[T]]': + if v is None: + return None + + v = set_validator(v) + v_len = len(v) + + if cls.min_items is not None and v_len < cls.min_items: + raise errors.SetMinLengthError(limit_value=cls.min_items) + + if cls.max_items is not None and v_len > cls.max_items: + raise errors.SetMaxLengthError(limit_value=cls.max_items) + + return v + + +def conset(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[Set[T]]: + # __args__ is needed to conform to typing generics api + namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]} + # We use new_class to be able to deal with Generic types + return new_class('ConstrainedSetValue', (ConstrainedSet,), {}, lambda ns: ns.update(namespace)) + + +# This types superclass should be FrozenSet[T], but cython chokes on that... +class ConstrainedFrozenSet(frozenset): # type: ignore + # Needed for pydantic to detect that this is a set + __origin__ = frozenset + __args__: FrozenSet[Type[T]] # type: ignore + + min_items: Optional[int] = None + max_items: Optional[int] = None + item_type: Type[T] # type: ignore + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.frozenset_length_validator + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items) + + @classmethod + def frozenset_length_validator(cls, v: 'Optional[FrozenSet[T]]') -> 'Optional[FrozenSet[T]]': + if v is None: + return None + + v = frozenset_validator(v) + v_len = len(v) + + if cls.min_items is not None and v_len < cls.min_items: + raise errors.FrozenSetMinLengthError(limit_value=cls.min_items) + + if cls.max_items is not None and v_len > cls.max_items: + raise errors.FrozenSetMaxLengthError(limit_value=cls.max_items) + + return v + + +def confrozenset(item_type: Type[T], *, min_items: int = None, max_items: int = None) -> Type[FrozenSet[T]]: + # __args__ is needed to conform to typing generics api + namespace = {'min_items': min_items, 'max_items': max_items, 'item_type': item_type, '__args__': [item_type]} + # We use new_class to be able to deal with Generic types + return new_class('ConstrainedFrozenSetValue', (ConstrainedFrozenSet,), {}, lambda ns: ns.update(namespace)) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ LIST TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +# This types superclass should be List[T], but cython chokes on that... +class ConstrainedList(list): # type: ignore + # Needed for pydantic to detect that this is a list + __origin__ = list + __args__: Tuple[Type[T], ...] # type: ignore + + min_items: Optional[int] = None + max_items: Optional[int] = None + unique_items: Optional[bool] = None + item_type: Type[T] # type: ignore + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.list_length_validator + if cls.unique_items: + yield cls.unique_items_validator + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, minItems=cls.min_items, maxItems=cls.max_items, uniqueItems=cls.unique_items) + + @classmethod + def list_length_validator(cls, v: 'Optional[List[T]]') -> 'Optional[List[T]]': + if v is None: + return None + + v = list_validator(v) + v_len = len(v) + + if cls.min_items is not None and v_len < cls.min_items: + raise errors.ListMinLengthError(limit_value=cls.min_items) + + if cls.max_items is not None and v_len > cls.max_items: + raise errors.ListMaxLengthError(limit_value=cls.max_items) + + return v + + @classmethod + def unique_items_validator(cls, v: 'List[T]') -> 'List[T]': + for i, value in enumerate(v, start=1): + if value in v[i:]: + raise errors.ListUniqueItemsError() + + return v + + +def conlist( + item_type: Type[T], *, min_items: int = None, max_items: int = None, unique_items: bool = None +) -> Type[List[T]]: + # __args__ is needed to conform to typing generics api + namespace = dict( + min_items=min_items, max_items=max_items, unique_items=unique_items, item_type=item_type, __args__=(item_type,) + ) + # We use new_class to be able to deal with Generic types + return new_class('ConstrainedListValue', (ConstrainedList,), {}, lambda ns: ns.update(namespace)) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PYOBJECT TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +if TYPE_CHECKING: + PyObject = Callable[..., Any] +else: + + class PyObject: + validate_always = True + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, value: Any) -> Any: + if isinstance(value, Callable): + return value + + try: + value = str_validator(value) + except errors.StrError: + raise errors.PyObjectError(error_message='value is neither a valid import path not a valid callable') + + try: + return import_string(value) + except ImportError as e: + raise errors.PyObjectError(error_message=str(e)) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DECIMAL TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class ConstrainedDecimal(Decimal, metaclass=ConstrainedNumberMeta): + gt: OptionalIntFloatDecimal = None + ge: OptionalIntFloatDecimal = None + lt: OptionalIntFloatDecimal = None + le: OptionalIntFloatDecimal = None + max_digits: OptionalInt = None + decimal_places: OptionalInt = None + multiple_of: OptionalIntFloatDecimal = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + exclusiveMinimum=cls.gt, + exclusiveMaximum=cls.lt, + minimum=cls.ge, + maximum=cls.le, + multipleOf=cls.multiple_of, + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield decimal_validator + yield number_size_validator + yield number_multiple_validator + yield cls.validate + + @classmethod + def validate(cls, value: Decimal) -> Decimal: + digit_tuple, exponent = value.as_tuple()[1:] + if exponent in {'F', 'n', 'N'}: + raise errors.DecimalIsNotFiniteError() + + if exponent >= 0: + # A positive exponent adds that many trailing zeros. + digits = len(digit_tuple) + exponent + decimals = 0 + else: + # If the absolute value of the negative exponent is larger than the + # number of digits, then it's the same as the number of digits, + # because it'll consume all of the digits in digit_tuple and then + # add abs(exponent) - len(digit_tuple) leading zeros after the + # decimal point. + if abs(exponent) > len(digit_tuple): + digits = decimals = abs(exponent) + else: + digits = len(digit_tuple) + decimals = abs(exponent) + whole_digits = digits - decimals + + if cls.max_digits is not None and digits > cls.max_digits: + raise errors.DecimalMaxDigitsError(max_digits=cls.max_digits) + + if cls.decimal_places is not None and decimals > cls.decimal_places: + raise errors.DecimalMaxPlacesError(decimal_places=cls.decimal_places) + + if cls.max_digits is not None and cls.decimal_places is not None: + expected = cls.max_digits - cls.decimal_places + if whole_digits > expected: + raise errors.DecimalWholeDigitsError(whole_digits=expected) + + return value + + +def condecimal( + *, + gt: Decimal = None, + ge: Decimal = None, + lt: Decimal = None, + le: Decimal = None, + max_digits: int = None, + decimal_places: int = None, + multiple_of: Decimal = None, +) -> Type[Decimal]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict( + gt=gt, ge=ge, lt=lt, le=le, max_digits=max_digits, decimal_places=decimal_places, multiple_of=multiple_of + ) + return type('ConstrainedDecimalValue', (ConstrainedDecimal,), namespace) + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ UUID TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +if TYPE_CHECKING: + UUID1 = UUID + UUID3 = UUID + UUID4 = UUID + UUID5 = UUID +else: + + class UUID1(UUID): + _required_version = 1 + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format=f'uuid{cls._required_version}') + + class UUID3(UUID1): + _required_version = 3 + + class UUID4(UUID1): + _required_version = 4 + + class UUID5(UUID1): + _required_version = 5 + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PATH TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +if TYPE_CHECKING: + FilePath = Path + DirectoryPath = Path +else: + + class FilePath(Path): + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(format='file-path') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield path_validator + yield path_exists_validator + yield cls.validate + + @classmethod + def validate(cls, value: Path) -> Path: + if not value.is_file(): + raise errors.PathNotAFileError(path=value) + + return value + + class DirectoryPath(Path): + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(format='directory-path') + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield path_validator + yield path_exists_validator + yield cls.validate + + @classmethod + def validate(cls, value: Path) -> Path: + if not value.is_dir(): + raise errors.PathNotADirectoryError(path=value) + + return value + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ JSON TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class JsonWrapper: + pass + + +class JsonMeta(type): + def __getitem__(self, t: Type[Any]) -> Type[JsonWrapper]: + if t is Any: + return Json # allow Json[Any] to replecate plain Json + return _registered(type('JsonWrapperValue', (JsonWrapper,), {'inner_type': t})) + + +if TYPE_CHECKING: + Json = Annotated[T, ...] # Json[list[str]] will be recognized by type checkers as list[str] + +else: + + class Json(metaclass=JsonMeta): + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + field_schema.update(type='string', format='json-string') + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SECRET TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class SecretField(abc.ABC): + """ + Note: this should be implemented as a generic like `SecretField(ABC, Generic[T])`, + the `__init__()` should be part of the abstract class and the + `get_secret_value()` method should use the generic `T` type. + + However Cython doesn't support very well generics at the moment and + the generated code fails to be imported (see + https://github.com/cython/cython/issues/2753). + """ + + def __eq__(self, other: Any) -> bool: + return isinstance(other, self.__class__) and self.get_secret_value() == other.get_secret_value() + + def __str__(self) -> str: + return '**********' if self.get_secret_value() else '' + + def __hash__(self) -> int: + return hash(self.get_secret_value()) + + @abc.abstractmethod + def get_secret_value(self) -> Any: # pragma: no cover + ... + + +class SecretStr(SecretField): + min_length: OptionalInt = None + max_length: OptionalInt = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + type='string', + writeOnly=True, + format='password', + minLength=cls.min_length, + maxLength=cls.max_length, + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + yield constr_length_validator + + @classmethod + def validate(cls, value: Any) -> 'SecretStr': + if isinstance(value, cls): + return value + value = str_validator(value) + return cls(value) + + def __init__(self, value: str): + self._secret_value = value + + def __repr__(self) -> str: + return f"SecretStr('{self}')" + + def __len__(self) -> int: + return len(self._secret_value) + + def display(self) -> str: + warnings.warn('`secret_str.display()` is deprecated, use `str(secret_str)` instead', DeprecationWarning) + return str(self) + + def get_secret_value(self) -> str: + return self._secret_value + + +class SecretBytes(SecretField): + min_length: OptionalInt = None + max_length: OptionalInt = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none( + field_schema, + type='string', + writeOnly=True, + format='password', + minLength=cls.min_length, + maxLength=cls.max_length, + ) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + yield constr_length_validator + + @classmethod + def validate(cls, value: Any) -> 'SecretBytes': + if isinstance(value, cls): + return value + value = bytes_validator(value) + return cls(value) + + def __init__(self, value: bytes): + self._secret_value = value + + def __repr__(self) -> str: + return f"SecretBytes(b'{self}')" + + def __len__(self) -> int: + return len(self._secret_value) + + def display(self) -> str: + warnings.warn('`secret_bytes.display()` is deprecated, use `str(secret_bytes)` instead', DeprecationWarning) + return str(self) + + def get_secret_value(self) -> bytes: + return self._secret_value + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ PAYMENT CARD TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + + +class PaymentCardBrand(str, Enum): + # If you add another card type, please also add it to the + # Hypothesis strategy in `pydantic._hypothesis_plugin`. + amex = 'American Express' + mastercard = 'Mastercard' + visa = 'Visa' + other = 'other' + + def __str__(self) -> str: + return self.value + + +class PaymentCardNumber(str): + """ + Based on: https://en.wikipedia.org/wiki/Payment_card_number + """ + + strip_whitespace: ClassVar[bool] = True + min_length: ClassVar[int] = 12 + max_length: ClassVar[int] = 19 + bin: str + last4: str + brand: PaymentCardBrand + + def __init__(self, card_number: str): + self.bin = card_number[:6] + self.last4 = card_number[-4:] + self.brand = self._get_brand(card_number) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield str_validator + yield constr_strip_whitespace + yield constr_length_validator + yield cls.validate_digits + yield cls.validate_luhn_check_digit + yield cls + yield cls.validate_length_for_brand + + @property + def masked(self) -> str: + num_masked = len(self) - 10 # len(bin) + len(last4) == 10 + return f'{self.bin}{"*" * num_masked}{self.last4}' + + @classmethod + def validate_digits(cls, card_number: str) -> str: + if not card_number.isdigit(): + raise errors.NotDigitError + return card_number + + @classmethod + def validate_luhn_check_digit(cls, card_number: str) -> str: + """ + Based on: https://en.wikipedia.org/wiki/Luhn_algorithm + """ + sum_ = int(card_number[-1]) + length = len(card_number) + parity = length % 2 + for i in range(length - 1): + digit = int(card_number[i]) + if i % 2 == parity: + digit *= 2 + if digit > 9: + digit -= 9 + sum_ += digit + valid = sum_ % 10 == 0 + if not valid: + raise errors.LuhnValidationError + return card_number + + @classmethod + def validate_length_for_brand(cls, card_number: 'PaymentCardNumber') -> 'PaymentCardNumber': + """ + Validate length based on BIN for major brands: + https://en.wikipedia.org/wiki/Payment_card_number#Issuer_identification_number_(IIN) + """ + required_length: Union[None, int, str] = None + if card_number.brand in PaymentCardBrand.mastercard: + required_length = 16 + valid = len(card_number) == required_length + elif card_number.brand == PaymentCardBrand.visa: + required_length = '13, 16 or 19' + valid = len(card_number) in {13, 16, 19} + elif card_number.brand == PaymentCardBrand.amex: + required_length = 15 + valid = len(card_number) == required_length + else: + valid = True + if not valid: + raise errors.InvalidLengthForBrand(brand=card_number.brand, required_length=required_length) + return card_number + + @staticmethod + def _get_brand(card_number: str) -> PaymentCardBrand: + if card_number[0] == '4': + brand = PaymentCardBrand.visa + elif 51 <= int(card_number[:2]) <= 55: + brand = PaymentCardBrand.mastercard + elif card_number[:2] in {'34', '37'}: + brand = PaymentCardBrand.amex + else: + brand = PaymentCardBrand.other + return brand + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BYTE SIZE TYPE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +BYTE_SIZES = { + 'b': 1, + 'kb': 10**3, + 'mb': 10**6, + 'gb': 10**9, + 'tb': 10**12, + 'pb': 10**15, + 'eb': 10**18, + 'kib': 2**10, + 'mib': 2**20, + 'gib': 2**30, + 'tib': 2**40, + 'pib': 2**50, + 'eib': 2**60, +} +BYTE_SIZES.update({k.lower()[0]: v for k, v in BYTE_SIZES.items() if 'i' not in k}) +byte_string_re = re.compile(r'^\s*(\d*\.?\d+)\s*(\w+)?', re.IGNORECASE) + + +class ByteSize(int): + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield cls.validate + + @classmethod + def validate(cls, v: StrIntFloat) -> 'ByteSize': + + try: + return cls(int(v)) + except ValueError: + pass + + str_match = byte_string_re.match(str(v)) + if str_match is None: + raise errors.InvalidByteSize() + + scalar, unit = str_match.groups() + if unit is None: + unit = 'b' + + try: + unit_mult = BYTE_SIZES[unit.lower()] + except KeyError: + raise errors.InvalidByteSizeUnit(unit=unit) + + return cls(int(float(scalar) * unit_mult)) + + def human_readable(self, decimal: bool = False) -> str: + + if decimal: + divisor = 1000 + units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB'] + final_unit = 'EB' + else: + divisor = 1024 + units = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB'] + final_unit = 'EiB' + + num = float(self) + for unit in units: + if abs(num) < divisor: + return f'{num:0.1f}{unit}' + num /= divisor + + return f'{num:0.1f}{final_unit}' + + def to(self, unit: str) -> float: + + try: + unit_div = BYTE_SIZES[unit.lower()] + except KeyError: + raise errors.InvalidByteSizeUnit(unit=unit) + + return self / unit_div + + +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ DATE TYPES ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +if TYPE_CHECKING: + PastDate = date + FutureDate = date +else: + + class PastDate(date): + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield parse_date + yield cls.validate + + @classmethod + def validate(cls, value: date) -> date: + if value >= date.today(): + raise errors.DateNotInThePastError() + + return value + + class FutureDate(date): + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield parse_date + yield cls.validate + + @classmethod + def validate(cls, value: date) -> date: + if value <= date.today(): + raise errors.DateNotInTheFutureError() + + return value + + +class ConstrainedDate(date, metaclass=ConstrainedNumberMeta): + gt: OptionalDate = None + ge: OptionalDate = None + lt: OptionalDate = None + le: OptionalDate = None + + @classmethod + def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None: + update_not_none(field_schema, exclusiveMinimum=cls.gt, exclusiveMaximum=cls.lt, minimum=cls.ge, maximum=cls.le) + + @classmethod + def __get_validators__(cls) -> 'CallableGenerator': + yield parse_date + yield number_size_validator + + +def condate( + *, + gt: date = None, + ge: date = None, + lt: date = None, + le: date = None, +) -> Type[date]: + # use kwargs then define conf in a dict to aid with IDE type hinting + namespace = dict(gt=gt, ge=ge, lt=lt, le=le) + return type('ConstrainedDateValue', (ConstrainedDate,), namespace) diff --git a/lib/pydantic/typing.py b/lib/pydantic/typing.py new file mode 100644 index 00000000..5ccf266c --- /dev/null +++ b/lib/pydantic/typing.py @@ -0,0 +1,602 @@ +import sys +from collections.abc import Callable +from os import PathLike +from typing import ( # type: ignore + TYPE_CHECKING, + AbstractSet, + Any, + Callable as TypingCallable, + ClassVar, + Dict, + ForwardRef, + Generator, + Iterable, + List, + Mapping, + NewType, + Optional, + Sequence, + Set, + Tuple, + Type, + TypeVar, + Union, + _eval_type, + cast, + get_type_hints, +) + +from typing_extensions import ( + Annotated, + Final, + Literal, + NotRequired as TypedDictNotRequired, + Required as TypedDictRequired, +) + +try: + from typing import _TypingBase as typing_base # type: ignore +except ImportError: + from typing import _Final as typing_base # type: ignore + +try: + from typing import GenericAlias as TypingGenericAlias # type: ignore +except ImportError: + # python < 3.9 does not have GenericAlias (list[int], tuple[str, ...] and so on) + TypingGenericAlias = () + +try: + from types import UnionType as TypesUnionType # type: ignore +except ImportError: + # python < 3.10 does not have UnionType (str | int, byte | bool and so on) + TypesUnionType = () + + +if sys.version_info < (3, 9): + + def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any: + return type_._evaluate(globalns, localns) + +else: + + def evaluate_forwardref(type_: ForwardRef, globalns: Any, localns: Any) -> Any: + # Even though it is the right signature for python 3.9, mypy complains with + # `error: Too many arguments for "_evaluate" of "ForwardRef"` hence the cast... + return cast(Any, type_)._evaluate(globalns, localns, set()) + + +if sys.version_info < (3, 9): + # Ensure we always get all the whole `Annotated` hint, not just the annotated type. + # For 3.7 to 3.8, `get_type_hints` doesn't recognize `typing_extensions.Annotated`, + # so it already returns the full annotation + get_all_type_hints = get_type_hints + +else: + + def get_all_type_hints(obj: Any, globalns: Any = None, localns: Any = None) -> Any: + return get_type_hints(obj, globalns, localns, include_extras=True) + + +_T = TypeVar('_T') + +AnyCallable = TypingCallable[..., Any] +NoArgAnyCallable = TypingCallable[[], Any] + +# workaround for https://github.com/python/mypy/issues/9496 +AnyArgTCallable = TypingCallable[..., _T] + + +# Annotated[...] is implemented by returning an instance of one of these classes, depending on +# python/typing_extensions version. +AnnotatedTypeNames = {'AnnotatedMeta', '_AnnotatedAlias'} + + +if sys.version_info < (3, 8): + + def get_origin(t: Type[Any]) -> Optional[Type[Any]]: + if type(t).__name__ in AnnotatedTypeNames: + # weirdly this is a runtime requirement, as well as for mypy + return cast(Type[Any], Annotated) + return getattr(t, '__origin__', None) + +else: + from typing import get_origin as _typing_get_origin + + def get_origin(tp: Type[Any]) -> Optional[Type[Any]]: + """ + We can't directly use `typing.get_origin` since we need a fallback to support + custom generic classes like `ConstrainedList` + It should be useless once https://github.com/cython/cython/issues/3537 is + solved and https://github.com/pydantic/pydantic/pull/1753 is merged. + """ + if type(tp).__name__ in AnnotatedTypeNames: + return cast(Type[Any], Annotated) # mypy complains about _SpecialForm + return _typing_get_origin(tp) or getattr(tp, '__origin__', None) + + +if sys.version_info < (3, 8): + from typing import _GenericAlias + + def get_args(t: Type[Any]) -> Tuple[Any, ...]: + """Compatibility version of get_args for python 3.7. + + Mostly compatible with the python 3.8 `typing` module version + and able to handle almost all use cases. + """ + if type(t).__name__ in AnnotatedTypeNames: + return t.__args__ + t.__metadata__ + if isinstance(t, _GenericAlias): + res = t.__args__ + if t.__origin__ is Callable and res and res[0] is not Ellipsis: + res = (list(res[:-1]), res[-1]) + return res + return getattr(t, '__args__', ()) + +else: + from typing import get_args as _typing_get_args + + def _generic_get_args(tp: Type[Any]) -> Tuple[Any, ...]: + """ + In python 3.9, `typing.Dict`, `typing.List`, ... + do have an empty `__args__` by default (instead of the generic ~T for example). + In order to still support `Dict` for example and consider it as `Dict[Any, Any]`, + we retrieve the `_nparams` value that tells us how many parameters it needs. + """ + if hasattr(tp, '_nparams'): + return (Any,) * tp._nparams + # Special case for `tuple[()]`, which used to return ((),) with `typing.Tuple` + # in python 3.10- but now returns () for `tuple` and `Tuple`. + # This will probably be clarified in pydantic v2 + try: + if tp == Tuple[()] or sys.version_info >= (3, 9) and tp == tuple[()]: # type: ignore[misc] + return ((),) + # there is a TypeError when compiled with cython + except TypeError: # pragma: no cover + pass + return () + + def get_args(tp: Type[Any]) -> Tuple[Any, ...]: + """Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + """ + if type(tp).__name__ in AnnotatedTypeNames: + return tp.__args__ + tp.__metadata__ + # the fallback is needed for the same reasons as `get_origin` (see above) + return _typing_get_args(tp) or getattr(tp, '__args__', ()) or _generic_get_args(tp) + + +if sys.version_info < (3, 9): + + def convert_generics(tp: Type[Any]) -> Type[Any]: + """Python 3.9 and older only supports generics from `typing` module. + They convert strings to ForwardRef automatically. + + Examples:: + typing.List['Hero'] == typing.List[ForwardRef('Hero')] + """ + return tp + +else: + from typing import _UnionGenericAlias # type: ignore + + from typing_extensions import _AnnotatedAlias + + def convert_generics(tp: Type[Any]) -> Type[Any]: + """ + Recursively searches for `str` type hints and replaces them with ForwardRef. + + Examples:: + convert_generics(list['Hero']) == list[ForwardRef('Hero')] + convert_generics(dict['Hero', 'Team']) == dict[ForwardRef('Hero'), ForwardRef('Team')] + convert_generics(typing.Dict['Hero', 'Team']) == typing.Dict[ForwardRef('Hero'), ForwardRef('Team')] + convert_generics(list[str | 'Hero'] | int) == list[str | ForwardRef('Hero')] | int + """ + origin = get_origin(tp) + if not origin or not hasattr(tp, '__args__'): + return tp + + args = get_args(tp) + + # typing.Annotated needs special treatment + if origin is Annotated: + return _AnnotatedAlias(convert_generics(args[0]), args[1:]) + + # recursively replace `str` instances inside of `GenericAlias` with `ForwardRef(arg)` + converted = tuple( + ForwardRef(arg) if isinstance(arg, str) and isinstance(tp, TypingGenericAlias) else convert_generics(arg) + for arg in args + ) + + if converted == args: + return tp + elif isinstance(tp, TypingGenericAlias): + return TypingGenericAlias(origin, converted) + elif isinstance(tp, TypesUnionType): + # recreate types.UnionType (PEP604, Python >= 3.10) + return _UnionGenericAlias(origin, converted) + else: + try: + setattr(tp, '__args__', converted) + except AttributeError: + pass + return tp + + +if sys.version_info < (3, 10): + + def is_union(tp: Optional[Type[Any]]) -> bool: + return tp is Union + + WithArgsTypes = (TypingGenericAlias,) + +else: + import types + import typing + + def is_union(tp: Optional[Type[Any]]) -> bool: + return tp is Union or tp is types.UnionType # noqa: E721 + + WithArgsTypes = (typing._GenericAlias, types.GenericAlias, types.UnionType) + + +if sys.version_info < (3, 9): + StrPath = Union[str, PathLike] +else: + StrPath = Union[str, PathLike] + # TODO: Once we switch to Cython 3 to handle generics properly + # (https://github.com/cython/cython/issues/2753), use following lines instead + # of the one above + # # os.PathLike only becomes subscriptable from Python 3.9 onwards + # StrPath = Union[str, PathLike[str]] + + +if TYPE_CHECKING: + from .fields import ModelField + + TupleGenerator = Generator[Tuple[str, Any], None, None] + DictStrAny = Dict[str, Any] + DictAny = Dict[Any, Any] + SetStr = Set[str] + ListStr = List[str] + IntStr = Union[int, str] + AbstractSetIntStr = AbstractSet[IntStr] + DictIntStrAny = Dict[IntStr, Any] + MappingIntStrAny = Mapping[IntStr, Any] + CallableGenerator = Generator[AnyCallable, None, None] + ReprArgs = Sequence[Tuple[Optional[str], Any]] + AnyClassMethod = classmethod[Any] + +__all__ = ( + 'AnyCallable', + 'NoArgAnyCallable', + 'NoneType', + 'is_none_type', + 'display_as_type', + 'resolve_annotations', + 'is_callable_type', + 'is_literal_type', + 'all_literal_values', + 'is_namedtuple', + 'is_typeddict', + 'is_typeddict_special', + 'is_new_type', + 'new_type_supertype', + 'is_classvar', + 'is_finalvar', + 'update_field_forward_refs', + 'update_model_forward_refs', + 'TupleGenerator', + 'DictStrAny', + 'DictAny', + 'SetStr', + 'ListStr', + 'IntStr', + 'AbstractSetIntStr', + 'DictIntStrAny', + 'CallableGenerator', + 'ReprArgs', + 'AnyClassMethod', + 'CallableGenerator', + 'WithArgsTypes', + 'get_args', + 'get_origin', + 'get_sub_types', + 'typing_base', + 'get_all_type_hints', + 'is_union', + 'StrPath', + 'MappingIntStrAny', +) + + +NoneType = None.__class__ + + +NONE_TYPES: Tuple[Any, Any, Any] = (None, NoneType, Literal[None]) + + +if sys.version_info < (3, 8): + # Even though this implementation is slower, we need it for python 3.7: + # In python 3.7 "Literal" is not a builtin type and uses a different + # mechanism. + # for this reason `Literal[None] is Literal[None]` evaluates to `False`, + # breaking the faster implementation used for the other python versions. + + def is_none_type(type_: Any) -> bool: + return type_ in NONE_TYPES + +elif sys.version_info[:2] == (3, 8): + + def is_none_type(type_: Any) -> bool: + for none_type in NONE_TYPES: + if type_ is none_type: + return True + # With python 3.8, specifically 3.8.10, Literal "is" check sare very flakey + # can change on very subtle changes like use of types in other modules, + # hopefully this check avoids that issue. + if is_literal_type(type_): # pragma: no cover + return all_literal_values(type_) == (None,) + return False + +else: + + def is_none_type(type_: Any) -> bool: + for none_type in NONE_TYPES: + if type_ is none_type: + return True + return False + + +def display_as_type(v: Type[Any]) -> str: + if not isinstance(v, typing_base) and not isinstance(v, WithArgsTypes) and not isinstance(v, type): + v = v.__class__ + + if is_union(get_origin(v)): + return f'Union[{", ".join(map(display_as_type, get_args(v)))}]' + + if isinstance(v, WithArgsTypes): + # Generic alias are constructs like `list[int]` + return str(v).replace('typing.', '') + + try: + return v.__name__ + except AttributeError: + # happens with typing objects + return str(v).replace('typing.', '') + + +def resolve_annotations(raw_annotations: Dict[str, Type[Any]], module_name: Optional[str]) -> Dict[str, Type[Any]]: + """ + Partially taken from typing.get_type_hints. + + Resolve string or ForwardRef annotations into type objects if possible. + """ + base_globals: Optional[Dict[str, Any]] = None + if module_name: + try: + module = sys.modules[module_name] + except KeyError: + # happens occasionally, see https://github.com/pydantic/pydantic/issues/2363 + pass + else: + base_globals = module.__dict__ + + annotations = {} + for name, value in raw_annotations.items(): + if isinstance(value, str): + if (3, 10) > sys.version_info >= (3, 9, 8) or sys.version_info >= (3, 10, 1): + value = ForwardRef(value, is_argument=False, is_class=True) + else: + value = ForwardRef(value, is_argument=False) + try: + value = _eval_type(value, base_globals, None) + except NameError: + # this is ok, it can be fixed with update_forward_refs + pass + annotations[name] = value + return annotations + + +def is_callable_type(type_: Type[Any]) -> bool: + return type_ is Callable or get_origin(type_) is Callable + + +def is_literal_type(type_: Type[Any]) -> bool: + return Literal is not None and get_origin(type_) is Literal + + +def literal_values(type_: Type[Any]) -> Tuple[Any, ...]: + return get_args(type_) + + +def all_literal_values(type_: Type[Any]) -> Tuple[Any, ...]: + """ + This method is used to retrieve all Literal values as + Literal can be used recursively (see https://www.python.org/dev/peps/pep-0586) + e.g. `Literal[Literal[Literal[1, 2, 3], "foo"], 5, None]` + """ + if not is_literal_type(type_): + return (type_,) + + values = literal_values(type_) + return tuple(x for value in values for x in all_literal_values(value)) + + +def is_namedtuple(type_: Type[Any]) -> bool: + """ + Check if a given class is a named tuple. + It can be either a `typing.NamedTuple` or `collections.namedtuple` + """ + from .utils import lenient_issubclass + + return lenient_issubclass(type_, tuple) and hasattr(type_, '_fields') + + +def is_typeddict(type_: Type[Any]) -> bool: + """ + Check if a given class is a typed dict (from `typing` or `typing_extensions`) + In 3.10, there will be a public method (https://docs.python.org/3.10/library/typing.html#typing.is_typeddict) + """ + from .utils import lenient_issubclass + + return lenient_issubclass(type_, dict) and hasattr(type_, '__total__') + + +def _check_typeddict_special(type_: Any) -> bool: + return type_ is TypedDictRequired or type_ is TypedDictNotRequired + + +def is_typeddict_special(type_: Any) -> bool: + """ + Check if type is a TypedDict special form (Required or NotRequired). + """ + return _check_typeddict_special(type_) or _check_typeddict_special(get_origin(type_)) + + +test_type = NewType('test_type', str) + + +def is_new_type(type_: Type[Any]) -> bool: + """ + Check whether type_ was created using typing.NewType + """ + return isinstance(type_, test_type.__class__) and hasattr(type_, '__supertype__') # type: ignore + + +def new_type_supertype(type_: Type[Any]) -> Type[Any]: + while hasattr(type_, '__supertype__'): + type_ = type_.__supertype__ + return type_ + + +def _check_classvar(v: Optional[Type[Any]]) -> bool: + if v is None: + return False + + return v.__class__ == ClassVar.__class__ and getattr(v, '_name', None) == 'ClassVar' + + +def _check_finalvar(v: Optional[Type[Any]]) -> bool: + """ + Check if a given type is a `typing.Final` type. + """ + if v is None: + return False + + return v.__class__ == Final.__class__ and (sys.version_info < (3, 8) or getattr(v, '_name', None) == 'Final') + + +def is_classvar(ann_type: Type[Any]) -> bool: + if _check_classvar(ann_type) or _check_classvar(get_origin(ann_type)): + return True + + # this is an ugly workaround for class vars that contain forward references and are therefore themselves + # forward references, see #3679 + if ann_type.__class__ == ForwardRef and ann_type.__forward_arg__.startswith('ClassVar['): + return True + + return False + + +def is_finalvar(ann_type: Type[Any]) -> bool: + return _check_finalvar(ann_type) or _check_finalvar(get_origin(ann_type)) + + +def update_field_forward_refs(field: 'ModelField', globalns: Any, localns: Any) -> None: + """ + Try to update ForwardRefs on fields based on this ModelField, globalns and localns. + """ + prepare = False + if field.type_.__class__ == ForwardRef: + prepare = True + field.type_ = evaluate_forwardref(field.type_, globalns, localns or None) + if field.outer_type_.__class__ == ForwardRef: + prepare = True + field.outer_type_ = evaluate_forwardref(field.outer_type_, globalns, localns or None) + if prepare: + field.prepare() + + if field.sub_fields: + for sub_f in field.sub_fields: + update_field_forward_refs(sub_f, globalns=globalns, localns=localns) + + if field.discriminator_key is not None: + field.prepare_discriminated_union_sub_fields() + + +def update_model_forward_refs( + model: Type[Any], + fields: Iterable['ModelField'], + json_encoders: Dict[Union[Type[Any], str, ForwardRef], AnyCallable], + localns: 'DictStrAny', + exc_to_suppress: Tuple[Type[BaseException], ...] = (), +) -> None: + """ + Try to update model fields ForwardRefs based on model and localns. + """ + if model.__module__ in sys.modules: + globalns = sys.modules[model.__module__].__dict__.copy() + else: + globalns = {} + + globalns.setdefault(model.__name__, model) + + for f in fields: + try: + update_field_forward_refs(f, globalns=globalns, localns=localns) + except exc_to_suppress: + pass + + for key in set(json_encoders.keys()): + if isinstance(key, str): + fr: ForwardRef = ForwardRef(key) + elif isinstance(key, ForwardRef): + fr = key + else: + continue + + try: + new_key = evaluate_forwardref(fr, globalns, localns or None) + except exc_to_suppress: # pragma: no cover + continue + + json_encoders[new_key] = json_encoders.pop(key) + + +def get_class(type_: Type[Any]) -> Union[None, bool, Type[Any]]: + """ + Tries to get the class of a Type[T] annotation. Returns True if Type is used + without brackets. Otherwise returns None. + """ + if type_ is type: + return True + + if get_origin(type_) is None: + return None + + args = get_args(type_) + if not args or not isinstance(args[0], type): + return True + else: + return args[0] + + +def get_sub_types(tp: Any) -> List[Any]: + """ + Return all the types that are allowed by type `tp` + `tp` can be a `Union` of allowed types or an `Annotated` type + """ + origin = get_origin(tp) + if origin is Annotated: + return get_sub_types(get_args(tp)[0]) + elif is_union(origin): + return [x for t in get_args(tp) for x in get_sub_types(t)] + else: + return [tp] diff --git a/lib/pydantic/utils.py b/lib/pydantic/utils.py new file mode 100644 index 00000000..1d016c0e --- /dev/null +++ b/lib/pydantic/utils.py @@ -0,0 +1,841 @@ +import keyword +import warnings +import weakref +from collections import OrderedDict, defaultdict, deque +from copy import deepcopy +from itertools import islice, zip_longest +from types import BuiltinFunctionType, CodeType, FunctionType, GeneratorType, LambdaType, ModuleType +from typing import ( + TYPE_CHECKING, + AbstractSet, + Any, + Callable, + Collection, + Dict, + Generator, + Iterable, + Iterator, + List, + Mapping, + MutableMapping, + NoReturn, + Optional, + Set, + Tuple, + Type, + TypeVar, + Union, +) + +from typing_extensions import Annotated + +from .errors import ConfigError +from .typing import ( + NoneType, + WithArgsTypes, + all_literal_values, + display_as_type, + get_args, + get_origin, + is_literal_type, + is_union, +) +from .version import version_info + +if TYPE_CHECKING: + from inspect import Signature + from pathlib import Path + + from .config import BaseConfig + from .dataclasses import Dataclass + from .fields import ModelField + from .main import BaseModel + from .typing import AbstractSetIntStr, DictIntStrAny, IntStr, MappingIntStrAny, ReprArgs + + RichReprResult = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]] + +__all__ = ( + 'import_string', + 'sequence_like', + 'validate_field_name', + 'lenient_isinstance', + 'lenient_issubclass', + 'in_ipython', + 'is_valid_identifier', + 'deep_update', + 'update_not_none', + 'almost_equal_floats', + 'get_model', + 'to_camel', + 'is_valid_field', + 'smart_deepcopy', + 'PyObjectStr', + 'Representation', + 'GetterDict', + 'ValueItems', + 'version_info', # required here to match behaviour in v1.3 + 'ClassAttribute', + 'path_type', + 'ROOT_KEY', + 'get_unique_discriminator_alias', + 'get_discriminator_alias_and_values', + 'DUNDER_ATTRIBUTES', + 'LimitedDict', +) + +ROOT_KEY = '__root__' +# these are types that are returned unchanged by deepcopy +IMMUTABLE_NON_COLLECTIONS_TYPES: Set[Type[Any]] = { + int, + float, + complex, + str, + bool, + bytes, + type, + NoneType, + FunctionType, + BuiltinFunctionType, + LambdaType, + weakref.ref, + CodeType, + # note: including ModuleType will differ from behaviour of deepcopy by not producing error. + # It might be not a good idea in general, but considering that this function used only internally + # against default values of fields, this will allow to actually have a field with module as default value + ModuleType, + NotImplemented.__class__, + Ellipsis.__class__, +} + +# these are types that if empty, might be copied with simple copy() instead of deepcopy() +BUILTIN_COLLECTIONS: Set[Type[Any]] = { + list, + set, + tuple, + frozenset, + dict, + OrderedDict, + defaultdict, + deque, +} + + +def import_string(dotted_path: str) -> Any: + """ + Stolen approximately from django. Import a dotted module path and return the attribute/class designated by the + last name in the path. Raise ImportError if the import fails. + """ + from importlib import import_module + + try: + module_path, class_name = dotted_path.strip(' ').rsplit('.', 1) + except ValueError as e: + raise ImportError(f'"{dotted_path}" doesn\'t look like a module path') from e + + module = import_module(module_path) + try: + return getattr(module, class_name) + except AttributeError as e: + raise ImportError(f'Module "{module_path}" does not define a "{class_name}" attribute') from e + + +def truncate(v: Union[str], *, max_len: int = 80) -> str: + """ + Truncate a value and add a unicode ellipsis (three dots) to the end if it was too long + """ + warnings.warn('`truncate` is no-longer used by pydantic and is deprecated', DeprecationWarning) + if isinstance(v, str) and len(v) > (max_len - 2): + # -3 so quote + string + … + quote has correct length + return (v[: (max_len - 3)] + '…').__repr__() + try: + v = v.__repr__() + except TypeError: + v = v.__class__.__repr__(v) # in case v is a type + if len(v) > max_len: + v = v[: max_len - 1] + '…' + return v + + +def sequence_like(v: Any) -> bool: + return isinstance(v, (list, tuple, set, frozenset, GeneratorType, deque)) + + +def validate_field_name(bases: List[Type['BaseModel']], field_name: str) -> None: + """ + Ensure that the field's name does not shadow an existing attribute of the model. + """ + for base in bases: + if getattr(base, field_name, None): + raise NameError( + f'Field name "{field_name}" shadows a BaseModel attribute; ' + f'use a different field name with "alias=\'{field_name}\'".' + ) + + +def lenient_isinstance(o: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool: + try: + return isinstance(o, class_or_tuple) # type: ignore[arg-type] + except TypeError: + return False + + +def lenient_issubclass(cls: Any, class_or_tuple: Union[Type[Any], Tuple[Type[Any], ...], None]) -> bool: + try: + return isinstance(cls, type) and issubclass(cls, class_or_tuple) # type: ignore[arg-type] + except TypeError: + if isinstance(cls, WithArgsTypes): + return False + raise # pragma: no cover + + +def in_ipython() -> bool: + """ + Check whether we're in an ipython environment, including jupyter notebooks. + """ + try: + eval('__IPYTHON__') + except NameError: + return False + else: # pragma: no cover + return True + + +def is_valid_identifier(identifier: str) -> bool: + """ + Checks that a string is a valid identifier and not a Python keyword. + :param identifier: The identifier to test. + :return: True if the identifier is valid. + """ + return identifier.isidentifier() and not keyword.iskeyword(identifier) + + +KeyType = TypeVar('KeyType') + + +def deep_update(mapping: Dict[KeyType, Any], *updating_mappings: Dict[KeyType, Any]) -> Dict[KeyType, Any]: + updated_mapping = mapping.copy() + for updating_mapping in updating_mappings: + for k, v in updating_mapping.items(): + if k in updated_mapping and isinstance(updated_mapping[k], dict) and isinstance(v, dict): + updated_mapping[k] = deep_update(updated_mapping[k], v) + else: + updated_mapping[k] = v + return updated_mapping + + +def update_not_none(mapping: Dict[Any, Any], **update: Any) -> None: + mapping.update({k: v for k, v in update.items() if v is not None}) + + +def almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool: + """ + Return True if two floats are almost equal + """ + return abs(value_1 - value_2) <= delta + + +def generate_model_signature( + init: Callable[..., None], fields: Dict[str, 'ModelField'], config: Type['BaseConfig'] +) -> 'Signature': + """ + Generate signature for model based on its fields + """ + from inspect import Parameter, Signature, signature + + from .config import Extra + + present_params = signature(init).parameters.values() + merged_params: Dict[str, Parameter] = {} + var_kw = None + use_var_kw = False + + for param in islice(present_params, 1, None): # skip self arg + if param.kind is param.VAR_KEYWORD: + var_kw = param + continue + merged_params[param.name] = param + + if var_kw: # if custom init has no var_kw, fields which are not declared in it cannot be passed through + allow_names = config.allow_population_by_field_name + for field_name, field in fields.items(): + param_name = field.alias + if field_name in merged_params or param_name in merged_params: + continue + elif not is_valid_identifier(param_name): + if allow_names and is_valid_identifier(field_name): + param_name = field_name + else: + use_var_kw = True + continue + + # TODO: replace annotation with actual expected types once #1055 solved + kwargs = {'default': field.default} if not field.required else {} + merged_params[param_name] = Parameter( + param_name, Parameter.KEYWORD_ONLY, annotation=field.annotation, **kwargs + ) + + if config.extra is Extra.allow: + use_var_kw = True + + if var_kw and use_var_kw: + # Make sure the parameter for extra kwargs + # does not have the same name as a field + default_model_signature = [ + ('__pydantic_self__', Parameter.POSITIONAL_OR_KEYWORD), + ('data', Parameter.VAR_KEYWORD), + ] + if [(p.name, p.kind) for p in present_params] == default_model_signature: + # if this is the standard model signature, use extra_data as the extra args name + var_kw_name = 'extra_data' + else: + # else start from var_kw + var_kw_name = var_kw.name + + # generate a name that's definitely unique + while var_kw_name in fields: + var_kw_name += '_' + merged_params[var_kw_name] = var_kw.replace(name=var_kw_name) + + return Signature(parameters=list(merged_params.values()), return_annotation=None) + + +def get_model(obj: Union[Type['BaseModel'], Type['Dataclass']]) -> Type['BaseModel']: + from .main import BaseModel + + try: + model_cls = obj.__pydantic_model__ # type: ignore + except AttributeError: + model_cls = obj + + if not issubclass(model_cls, BaseModel): + raise TypeError('Unsupported type, must be either BaseModel or dataclass') + return model_cls + + +def to_camel(string: str) -> str: + return ''.join(word.capitalize() for word in string.split('_')) + + +def to_lower_camel(string: str) -> str: + if len(string) >= 1: + pascal_string = to_camel(string) + return pascal_string[0].lower() + pascal_string[1:] + return string.lower() + + +T = TypeVar('T') + + +def unique_list( + input_list: Union[List[T], Tuple[T, ...]], + *, + name_factory: Callable[[T], str] = str, +) -> List[T]: + """ + Make a list unique while maintaining order. + We update the list if another one with the same name is set + (e.g. root validator overridden in subclass) + """ + result: List[T] = [] + result_names: List[str] = [] + for v in input_list: + v_name = name_factory(v) + if v_name not in result_names: + result_names.append(v_name) + result.append(v) + else: + result[result_names.index(v_name)] = v + + return result + + +class PyObjectStr(str): + """ + String class where repr doesn't include quotes. Useful with Representation when you want to return a string + representation of something that valid (or pseudo-valid) python. + """ + + def __repr__(self) -> str: + return str(self) + + +class Representation: + """ + Mixin to provide __str__, __repr__, and __pretty__ methods. See #884 for more details. + + __pretty__ is used by [devtools](https://python-devtools.helpmanual.io/) to provide human readable representations + of objects. + """ + + __slots__: Tuple[str, ...] = tuple() + + def __repr_args__(self) -> 'ReprArgs': + """ + Returns the attributes to show in __str__, __repr__, and __pretty__ this is generally overridden. + + Can either return: + * name - value pairs, e.g.: `[('foo_name', 'foo'), ('bar_name', ['b', 'a', 'r'])]` + * or, just values, e.g.: `[(None, 'foo'), (None, ['b', 'a', 'r'])]` + """ + attrs = ((s, getattr(self, s)) for s in self.__slots__) + return [(a, v) for a, v in attrs if v is not None] + + def __repr_name__(self) -> str: + """ + Name of the instance's class, used in __repr__. + """ + return self.__class__.__name__ + + def __repr_str__(self, join_str: str) -> str: + return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__()) + + def __pretty__(self, fmt: Callable[[Any], Any], **kwargs: Any) -> Generator[Any, None, None]: + """ + Used by devtools (https://python-devtools.helpmanual.io/) to provide a human readable representations of objects + """ + yield self.__repr_name__() + '(' + yield 1 + for name, value in self.__repr_args__(): + if name is not None: + yield name + '=' + yield fmt(value) + yield ',' + yield 0 + yield -1 + yield ')' + + def __str__(self) -> str: + return self.__repr_str__(' ') + + def __repr__(self) -> str: + return f'{self.__repr_name__()}({self.__repr_str__(", ")})' + + def __rich_repr__(self) -> 'RichReprResult': + """Get fields for Rich library""" + for name, field_repr in self.__repr_args__(): + if name is None: + yield field_repr + else: + yield name, field_repr + + +class GetterDict(Representation): + """ + Hack to make object's smell just enough like dicts for validate_model. + + We can't inherit from Mapping[str, Any] because it upsets cython so we have to implement all methods ourselves. + """ + + __slots__ = ('_obj',) + + def __init__(self, obj: Any): + self._obj = obj + + def __getitem__(self, key: str) -> Any: + try: + return getattr(self._obj, key) + except AttributeError as e: + raise KeyError(key) from e + + def get(self, key: Any, default: Any = None) -> Any: + return getattr(self._obj, key, default) + + def extra_keys(self) -> Set[Any]: + """ + We don't want to get any other attributes of obj if the model didn't explicitly ask for them + """ + return set() + + def keys(self) -> List[Any]: + """ + Keys of the pseudo dictionary, uses a list not set so order information can be maintained like python + dictionaries. + """ + return list(self) + + def values(self) -> List[Any]: + return [self[k] for k in self] + + def items(self) -> Iterator[Tuple[str, Any]]: + for k in self: + yield k, self.get(k) + + def __iter__(self) -> Iterator[str]: + for name in dir(self._obj): + if not name.startswith('_'): + yield name + + def __len__(self) -> int: + return sum(1 for _ in self) + + def __contains__(self, item: Any) -> bool: + return item in self.keys() + + def __eq__(self, other: Any) -> bool: + return dict(self) == dict(other.items()) + + def __repr_args__(self) -> 'ReprArgs': + return [(None, dict(self))] + + def __repr_name__(self) -> str: + return f'GetterDict[{display_as_type(self._obj)}]' + + +class ValueItems(Representation): + """ + Class for more convenient calculation of excluded or included fields on values. + """ + + __slots__ = ('_items', '_type') + + def __init__(self, value: Any, items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> None: + items = self._coerce_items(items) + + if isinstance(value, (list, tuple)): + items = self._normalize_indexes(items, len(value)) + + self._items: 'MappingIntStrAny' = items + + def is_excluded(self, item: Any) -> bool: + """ + Check if item is fully excluded. + + :param item: key or index of a value + """ + return self.is_true(self._items.get(item)) + + def is_included(self, item: Any) -> bool: + """ + Check if value is contained in self._items + + :param item: key or index of value + """ + return item in self._items + + def for_element(self, e: 'IntStr') -> Optional[Union['AbstractSetIntStr', 'MappingIntStrAny']]: + """ + :param e: key or index of element on value + :return: raw values for element if self._items is dict and contain needed element + """ + + item = self._items.get(e) + return item if not self.is_true(item) else None + + def _normalize_indexes(self, items: 'MappingIntStrAny', v_length: int) -> 'DictIntStrAny': + """ + :param items: dict or set of indexes which will be normalized + :param v_length: length of sequence indexes of which will be + + >>> self._normalize_indexes({0: True, -2: True, -1: True}, 4) + {0: True, 2: True, 3: True} + >>> self._normalize_indexes({'__all__': True}, 4) + {0: True, 1: True, 2: True, 3: True} + """ + + normalized_items: 'DictIntStrAny' = {} + all_items = None + for i, v in items.items(): + if not (isinstance(v, Mapping) or isinstance(v, AbstractSet) or self.is_true(v)): + raise TypeError(f'Unexpected type of exclude value for index "{i}" {v.__class__}') + if i == '__all__': + all_items = self._coerce_value(v) + continue + if not isinstance(i, int): + raise TypeError( + 'Excluding fields from a sequence of sub-models or dicts must be performed index-wise: ' + 'expected integer keys or keyword "__all__"' + ) + normalized_i = v_length + i if i < 0 else i + normalized_items[normalized_i] = self.merge(v, normalized_items.get(normalized_i)) + + if not all_items: + return normalized_items + if self.is_true(all_items): + for i in range(v_length): + normalized_items.setdefault(i, ...) + return normalized_items + for i in range(v_length): + normalized_item = normalized_items.setdefault(i, {}) + if not self.is_true(normalized_item): + normalized_items[i] = self.merge(all_items, normalized_item) + return normalized_items + + @classmethod + def merge(cls, base: Any, override: Any, intersect: bool = False) -> Any: + """ + Merge a ``base`` item with an ``override`` item. + + Both ``base`` and ``override`` are converted to dictionaries if possible. + Sets are converted to dictionaries with the sets entries as keys and + Ellipsis as values. + + Each key-value pair existing in ``base`` is merged with ``override``, + while the rest of the key-value pairs are updated recursively with this function. + + Merging takes place based on the "union" of keys if ``intersect`` is + set to ``False`` (default) and on the intersection of keys if + ``intersect`` is set to ``True``. + """ + override = cls._coerce_value(override) + base = cls._coerce_value(base) + if override is None: + return base + if cls.is_true(base) or base is None: + return override + if cls.is_true(override): + return base if intersect else override + + # intersection or union of keys while preserving ordering: + if intersect: + merge_keys = [k for k in base if k in override] + [k for k in override if k in base] + else: + merge_keys = list(base) + [k for k in override if k not in base] + + merged: 'DictIntStrAny' = {} + for k in merge_keys: + merged_item = cls.merge(base.get(k), override.get(k), intersect=intersect) + if merged_item is not None: + merged[k] = merged_item + + return merged + + @staticmethod + def _coerce_items(items: Union['AbstractSetIntStr', 'MappingIntStrAny']) -> 'MappingIntStrAny': + if isinstance(items, Mapping): + pass + elif isinstance(items, AbstractSet): + items = dict.fromkeys(items, ...) + else: + class_name = getattr(items, '__class__', '???') + assert_never( + items, + f'Unexpected type of exclude value {class_name}', + ) + return items + + @classmethod + def _coerce_value(cls, value: Any) -> Any: + if value is None or cls.is_true(value): + return value + return cls._coerce_items(value) + + @staticmethod + def is_true(v: Any) -> bool: + return v is True or v is ... + + def __repr_args__(self) -> 'ReprArgs': + return [(None, self._items)] + + +class ClassAttribute: + """ + Hide class attribute from its instances + """ + + __slots__ = ( + 'name', + 'value', + ) + + def __init__(self, name: str, value: Any) -> None: + self.name = name + self.value = value + + def __get__(self, instance: Any, owner: Type[Any]) -> None: + if instance is None: + return self.value + raise AttributeError(f'{self.name!r} attribute of {owner.__name__!r} is class-only') + + +path_types = { + 'is_dir': 'directory', + 'is_file': 'file', + 'is_mount': 'mount point', + 'is_symlink': 'symlink', + 'is_block_device': 'block device', + 'is_char_device': 'char device', + 'is_fifo': 'FIFO', + 'is_socket': 'socket', +} + + +def path_type(p: 'Path') -> str: + """ + Find out what sort of thing a path is. + """ + assert p.exists(), 'path does not exist' + for method, name in path_types.items(): + if getattr(p, method)(): + return name + + return 'unknown' + + +Obj = TypeVar('Obj') + + +def smart_deepcopy(obj: Obj) -> Obj: + """ + Return type as is for immutable built-in types + Use obj.copy() for built-in empty collections + Use copy.deepcopy() for non-empty collections and unknown objects + """ + + obj_type = obj.__class__ + if obj_type in IMMUTABLE_NON_COLLECTIONS_TYPES: + return obj # fastest case: obj is immutable and not collection therefore will not be copied anyway + try: + if not obj and obj_type in BUILTIN_COLLECTIONS: + # faster way for empty collections, no need to copy its members + return obj if obj_type is tuple else obj.copy() # type: ignore # tuple doesn't have copy method + except (TypeError, ValueError, RuntimeError): + # do we really dare to catch ALL errors? Seems a bit risky + pass + + return deepcopy(obj) # slowest way when we actually might need a deepcopy + + +def is_valid_field(name: str) -> bool: + if not name.startswith('_'): + return True + return ROOT_KEY == name + + +DUNDER_ATTRIBUTES = { + '__annotations__', + '__classcell__', + '__doc__', + '__module__', + '__orig_bases__', + '__orig_class__', + '__qualname__', +} + + +def is_valid_private_name(name: str) -> bool: + return not is_valid_field(name) and name not in DUNDER_ATTRIBUTES + + +_EMPTY = object() + + +def all_identical(left: Iterable[Any], right: Iterable[Any]) -> bool: + """ + Check that the items of `left` are the same objects as those in `right`. + + >>> a, b = object(), object() + >>> all_identical([a, b, a], [a, b, a]) + True + >>> all_identical([a, b, [a]], [a, b, [a]]) # new list object, while "equal" is not "identical" + False + """ + for left_item, right_item in zip_longest(left, right, fillvalue=_EMPTY): + if left_item is not right_item: + return False + return True + + +def assert_never(obj: NoReturn, msg: str) -> NoReturn: + """ + Helper to make sure that we have covered all possible types. + + This is mostly useful for ``mypy``, docs: + https://mypy.readthedocs.io/en/latest/literal_types.html#exhaustive-checks + """ + raise TypeError(msg) + + +def get_unique_discriminator_alias(all_aliases: Collection[str], discriminator_key: str) -> str: + """Validate that all aliases are the same and if that's the case return the alias""" + unique_aliases = set(all_aliases) + if len(unique_aliases) > 1: + raise ConfigError( + f'Aliases for discriminator {discriminator_key!r} must be the same (got {", ".join(sorted(all_aliases))})' + ) + return unique_aliases.pop() + + +def get_discriminator_alias_and_values(tp: Any, discriminator_key: str) -> Tuple[str, Tuple[str, ...]]: + """ + Get alias and all valid values in the `Literal` type of the discriminator field + `tp` can be a `BaseModel` class or directly an `Annotated` `Union` of many. + """ + is_root_model = getattr(tp, '__custom_root_type__', False) + + if get_origin(tp) is Annotated: + tp = get_args(tp)[0] + + if hasattr(tp, '__pydantic_model__'): + tp = tp.__pydantic_model__ + + if is_union(get_origin(tp)): + alias, all_values = _get_union_alias_and_all_values(tp, discriminator_key) + return alias, tuple(v for values in all_values for v in values) + elif is_root_model: + union_type = tp.__fields__[ROOT_KEY].type_ + alias, all_values = _get_union_alias_and_all_values(union_type, discriminator_key) + + if len(set(all_values)) > 1: + raise ConfigError( + f'Field {discriminator_key!r} is not the same for all submodels of {display_as_type(tp)!r}' + ) + + return alias, all_values[0] + + else: + try: + t_discriminator_type = tp.__fields__[discriminator_key].type_ + except AttributeError as e: + raise TypeError(f'Type {tp.__name__!r} is not a valid `BaseModel` or `dataclass`') from e + except KeyError as e: + raise ConfigError(f'Model {tp.__name__!r} needs a discriminator field for key {discriminator_key!r}') from e + + if not is_literal_type(t_discriminator_type): + raise ConfigError(f'Field {discriminator_key!r} of model {tp.__name__!r} needs to be a `Literal`') + + return tp.__fields__[discriminator_key].alias, all_literal_values(t_discriminator_type) + + +def _get_union_alias_and_all_values( + union_type: Type[Any], discriminator_key: str +) -> Tuple[str, Tuple[Tuple[str, ...], ...]]: + zipped_aliases_values = [get_discriminator_alias_and_values(t, discriminator_key) for t in get_args(union_type)] + # unzip: [('alias_a',('v1', 'v2)), ('alias_b', ('v3',))] => [('alias_a', 'alias_b'), (('v1', 'v2'), ('v3',))] + all_aliases, all_values = zip(*zipped_aliases_values) + return get_unique_discriminator_alias(all_aliases, discriminator_key), all_values + + +KT = TypeVar('KT') +VT = TypeVar('VT') +if TYPE_CHECKING: + # Annoying inheriting from `MutableMapping` and `dict` breaks cython, hence this work around + class LimitedDict(dict, MutableMapping[KT, VT]): # type: ignore[type-arg] + def __init__(self, size_limit: int = 1000): + ... + +else: + + class LimitedDict(dict): + """ + Limit the size/length of a dict used for caching to avoid unlimited increase in memory usage. + + Since the dict is ordered, and we always remove elements from the beginning, this is effectively a FIFO cache. + + Annoying inheriting from `MutableMapping` breaks cython. + """ + + def __init__(self, size_limit: int = 1000): + self.size_limit = size_limit + super().__init__() + + def __setitem__(self, __key: Any, __value: Any) -> None: + super().__setitem__(__key, __value) + if len(self) > self.size_limit: + excess = len(self) - self.size_limit + self.size_limit // 10 + to_remove = list(self.keys())[:excess] + for key in to_remove: + del self[key] + + def __class_getitem__(cls, *args: Any) -> Any: + # to avoid errors with 3.7 + pass diff --git a/lib/pydantic/validators.py b/lib/pydantic/validators.py new file mode 100644 index 00000000..fb6d0418 --- /dev/null +++ b/lib/pydantic/validators.py @@ -0,0 +1,765 @@ +import math +import re +from collections import OrderedDict, deque +from collections.abc import Hashable as CollectionsHashable +from datetime import date, datetime, time, timedelta +from decimal import Decimal, DecimalException +from enum import Enum, IntEnum +from ipaddress import IPv4Address, IPv4Interface, IPv4Network, IPv6Address, IPv6Interface, IPv6Network +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Deque, + Dict, + ForwardRef, + FrozenSet, + Generator, + Hashable, + List, + NamedTuple, + Pattern, + Set, + Tuple, + Type, + TypeVar, + Union, +) +from uuid import UUID + +from . import errors +from .datetime_parse import parse_date, parse_datetime, parse_duration, parse_time +from .typing import ( + AnyCallable, + all_literal_values, + display_as_type, + get_class, + is_callable_type, + is_literal_type, + is_namedtuple, + is_none_type, + is_typeddict, +) +from .utils import almost_equal_floats, lenient_issubclass, sequence_like + +if TYPE_CHECKING: + from typing_extensions import Literal, TypedDict + + from .config import BaseConfig + from .fields import ModelField + from .types import ConstrainedDecimal, ConstrainedFloat, ConstrainedInt + + ConstrainedNumber = Union[ConstrainedDecimal, ConstrainedFloat, ConstrainedInt] + AnyOrderedDict = OrderedDict[Any, Any] + Number = Union[int, float, Decimal] + StrBytes = Union[str, bytes] + + +def str_validator(v: Any) -> Union[str]: + if isinstance(v, str): + if isinstance(v, Enum): + return v.value + else: + return v + elif isinstance(v, (float, int, Decimal)): + # is there anything else we want to add here? If you think so, create an issue. + return str(v) + elif isinstance(v, (bytes, bytearray)): + return v.decode() + else: + raise errors.StrError() + + +def strict_str_validator(v: Any) -> Union[str]: + if isinstance(v, str) and not isinstance(v, Enum): + return v + raise errors.StrError() + + +def bytes_validator(v: Any) -> Union[bytes]: + if isinstance(v, bytes): + return v + elif isinstance(v, bytearray): + return bytes(v) + elif isinstance(v, str): + return v.encode() + elif isinstance(v, (float, int, Decimal)): + return str(v).encode() + else: + raise errors.BytesError() + + +def strict_bytes_validator(v: Any) -> Union[bytes]: + if isinstance(v, bytes): + return v + elif isinstance(v, bytearray): + return bytes(v) + else: + raise errors.BytesError() + + +BOOL_FALSE = {0, '0', 'off', 'f', 'false', 'n', 'no'} +BOOL_TRUE = {1, '1', 'on', 't', 'true', 'y', 'yes'} + + +def bool_validator(v: Any) -> bool: + if v is True or v is False: + return v + if isinstance(v, bytes): + v = v.decode() + if isinstance(v, str): + v = v.lower() + try: + if v in BOOL_TRUE: + return True + if v in BOOL_FALSE: + return False + except TypeError: + raise errors.BoolError() + raise errors.BoolError() + + +# matches the default limit cpython, see https://github.com/python/cpython/pull/96500 +max_str_int = 4_300 + + +def int_validator(v: Any) -> int: + if isinstance(v, int) and not (v is True or v is False): + return v + + # see https://github.com/pydantic/pydantic/issues/1477 and in turn, https://github.com/python/cpython/issues/95778 + # this check should be unnecessary once patch releases are out for 3.7, 3.8, 3.9 and 3.10 + # but better to check here until then. + # NOTICE: this does not fully protect user from the DOS risk since the standard library JSON implementation + # (and other std lib modules like xml) use `int()` and are likely called before this, the best workaround is to + # 1. update to the latest patch release of python once released, 2. use a different JSON library like ujson + if isinstance(v, (str, bytes, bytearray)) and len(v) > max_str_int: + raise errors.IntegerError() + + try: + return int(v) + except (TypeError, ValueError, OverflowError): + raise errors.IntegerError() + + +def strict_int_validator(v: Any) -> int: + if isinstance(v, int) and not (v is True or v is False): + return v + raise errors.IntegerError() + + +def float_validator(v: Any) -> float: + if isinstance(v, float): + return v + + try: + return float(v) + except (TypeError, ValueError): + raise errors.FloatError() + + +def strict_float_validator(v: Any) -> float: + if isinstance(v, float): + return v + raise errors.FloatError() + + +def float_finite_validator(v: 'Number', field: 'ModelField', config: 'BaseConfig') -> 'Number': + allow_inf_nan = getattr(field.type_, 'allow_inf_nan', None) + if allow_inf_nan is None: + allow_inf_nan = config.allow_inf_nan + + if allow_inf_nan is False and (math.isnan(v) or math.isinf(v)): + raise errors.NumberNotFiniteError() + return v + + +def number_multiple_validator(v: 'Number', field: 'ModelField') -> 'Number': + field_type: ConstrainedNumber = field.type_ + if field_type.multiple_of is not None: + mod = float(v) / float(field_type.multiple_of) % 1 + if not almost_equal_floats(mod, 0.0) and not almost_equal_floats(mod, 1.0): + raise errors.NumberNotMultipleError(multiple_of=field_type.multiple_of) + return v + + +def number_size_validator(v: 'Number', field: 'ModelField') -> 'Number': + field_type: ConstrainedNumber = field.type_ + if field_type.gt is not None and not v > field_type.gt: + raise errors.NumberNotGtError(limit_value=field_type.gt) + elif field_type.ge is not None and not v >= field_type.ge: + raise errors.NumberNotGeError(limit_value=field_type.ge) + + if field_type.lt is not None and not v < field_type.lt: + raise errors.NumberNotLtError(limit_value=field_type.lt) + if field_type.le is not None and not v <= field_type.le: + raise errors.NumberNotLeError(limit_value=field_type.le) + + return v + + +def constant_validator(v: 'Any', field: 'ModelField') -> 'Any': + """Validate ``const`` fields. + + The value provided for a ``const`` field must be equal to the default value + of the field. This is to support the keyword of the same name in JSON + Schema. + """ + if v != field.default: + raise errors.WrongConstantError(given=v, permitted=[field.default]) + + return v + + +def anystr_length_validator(v: 'StrBytes', config: 'BaseConfig') -> 'StrBytes': + v_len = len(v) + + min_length = config.min_anystr_length + if v_len < min_length: + raise errors.AnyStrMinLengthError(limit_value=min_length) + + max_length = config.max_anystr_length + if max_length is not None and v_len > max_length: + raise errors.AnyStrMaxLengthError(limit_value=max_length) + + return v + + +def anystr_strip_whitespace(v: 'StrBytes') -> 'StrBytes': + return v.strip() + + +def anystr_upper(v: 'StrBytes') -> 'StrBytes': + return v.upper() + + +def anystr_lower(v: 'StrBytes') -> 'StrBytes': + return v.lower() + + +def ordered_dict_validator(v: Any) -> 'AnyOrderedDict': + if isinstance(v, OrderedDict): + return v + + try: + return OrderedDict(v) + except (TypeError, ValueError): + raise errors.DictError() + + +def dict_validator(v: Any) -> Dict[Any, Any]: + if isinstance(v, dict): + return v + + try: + return dict(v) + except (TypeError, ValueError): + raise errors.DictError() + + +def list_validator(v: Any) -> List[Any]: + if isinstance(v, list): + return v + elif sequence_like(v): + return list(v) + else: + raise errors.ListError() + + +def tuple_validator(v: Any) -> Tuple[Any, ...]: + if isinstance(v, tuple): + return v + elif sequence_like(v): + return tuple(v) + else: + raise errors.TupleError() + + +def set_validator(v: Any) -> Set[Any]: + if isinstance(v, set): + return v + elif sequence_like(v): + return set(v) + else: + raise errors.SetError() + + +def frozenset_validator(v: Any) -> FrozenSet[Any]: + if isinstance(v, frozenset): + return v + elif sequence_like(v): + return frozenset(v) + else: + raise errors.FrozenSetError() + + +def deque_validator(v: Any) -> Deque[Any]: + if isinstance(v, deque): + return v + elif sequence_like(v): + return deque(v) + else: + raise errors.DequeError() + + +def enum_member_validator(v: Any, field: 'ModelField', config: 'BaseConfig') -> Enum: + try: + enum_v = field.type_(v) + except ValueError: + # field.type_ should be an enum, so will be iterable + raise errors.EnumMemberError(enum_values=list(field.type_)) + return enum_v.value if config.use_enum_values else enum_v + + +def uuid_validator(v: Any, field: 'ModelField') -> UUID: + try: + if isinstance(v, str): + v = UUID(v) + elif isinstance(v, (bytes, bytearray)): + try: + v = UUID(v.decode()) + except ValueError: + # 16 bytes in big-endian order as the bytes argument fail + # the above check + v = UUID(bytes=v) + except ValueError: + raise errors.UUIDError() + + if not isinstance(v, UUID): + raise errors.UUIDError() + + required_version = getattr(field.type_, '_required_version', None) + if required_version and v.version != required_version: + raise errors.UUIDVersionError(required_version=required_version) + + return v + + +def decimal_validator(v: Any) -> Decimal: + if isinstance(v, Decimal): + return v + elif isinstance(v, (bytes, bytearray)): + v = v.decode() + + v = str(v).strip() + + try: + v = Decimal(v) + except DecimalException: + raise errors.DecimalError() + + if not v.is_finite(): + raise errors.DecimalIsNotFiniteError() + + return v + + +def hashable_validator(v: Any) -> Hashable: + if isinstance(v, Hashable): + return v + + raise errors.HashableError() + + +def ip_v4_address_validator(v: Any) -> IPv4Address: + if isinstance(v, IPv4Address): + return v + + try: + return IPv4Address(v) + except ValueError: + raise errors.IPv4AddressError() + + +def ip_v6_address_validator(v: Any) -> IPv6Address: + if isinstance(v, IPv6Address): + return v + + try: + return IPv6Address(v) + except ValueError: + raise errors.IPv6AddressError() + + +def ip_v4_network_validator(v: Any) -> IPv4Network: + """ + Assume IPv4Network initialised with a default ``strict`` argument + + See more: + https://docs.python.org/library/ipaddress.html#ipaddress.IPv4Network + """ + if isinstance(v, IPv4Network): + return v + + try: + return IPv4Network(v) + except ValueError: + raise errors.IPv4NetworkError() + + +def ip_v6_network_validator(v: Any) -> IPv6Network: + """ + Assume IPv6Network initialised with a default ``strict`` argument + + See more: + https://docs.python.org/library/ipaddress.html#ipaddress.IPv6Network + """ + if isinstance(v, IPv6Network): + return v + + try: + return IPv6Network(v) + except ValueError: + raise errors.IPv6NetworkError() + + +def ip_v4_interface_validator(v: Any) -> IPv4Interface: + if isinstance(v, IPv4Interface): + return v + + try: + return IPv4Interface(v) + except ValueError: + raise errors.IPv4InterfaceError() + + +def ip_v6_interface_validator(v: Any) -> IPv6Interface: + if isinstance(v, IPv6Interface): + return v + + try: + return IPv6Interface(v) + except ValueError: + raise errors.IPv6InterfaceError() + + +def path_validator(v: Any) -> Path: + if isinstance(v, Path): + return v + + try: + return Path(v) + except TypeError: + raise errors.PathError() + + +def path_exists_validator(v: Any) -> Path: + if not v.exists(): + raise errors.PathNotExistsError(path=v) + + return v + + +def callable_validator(v: Any) -> AnyCallable: + """ + Perform a simple check if the value is callable. + + Note: complete matching of argument type hints and return types is not performed + """ + if callable(v): + return v + + raise errors.CallableError(value=v) + + +def enum_validator(v: Any) -> Enum: + if isinstance(v, Enum): + return v + + raise errors.EnumError(value=v) + + +def int_enum_validator(v: Any) -> IntEnum: + if isinstance(v, IntEnum): + return v + + raise errors.IntEnumError(value=v) + + +def make_literal_validator(type_: Any) -> Callable[[Any], Any]: + permitted_choices = all_literal_values(type_) + + # To have a O(1) complexity and still return one of the values set inside the `Literal`, + # we create a dict with the set values (a set causes some problems with the way intersection works). + # In some cases the set value and checked value can indeed be different (see `test_literal_validator_str_enum`) + allowed_choices = {v: v for v in permitted_choices} + + def literal_validator(v: Any) -> Any: + try: + return allowed_choices[v] + except KeyError: + raise errors.WrongConstantError(given=v, permitted=permitted_choices) + + return literal_validator + + +def constr_length_validator(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes': + v_len = len(v) + + min_length = field.type_.min_length if field.type_.min_length is not None else config.min_anystr_length + if v_len < min_length: + raise errors.AnyStrMinLengthError(limit_value=min_length) + + max_length = field.type_.max_length if field.type_.max_length is not None else config.max_anystr_length + if max_length is not None and v_len > max_length: + raise errors.AnyStrMaxLengthError(limit_value=max_length) + + return v + + +def constr_strip_whitespace(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes': + strip_whitespace = field.type_.strip_whitespace or config.anystr_strip_whitespace + if strip_whitespace: + v = v.strip() + + return v + + +def constr_upper(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes': + upper = field.type_.to_upper or config.anystr_upper + if upper: + v = v.upper() + + return v + + +def constr_lower(v: 'StrBytes', field: 'ModelField', config: 'BaseConfig') -> 'StrBytes': + lower = field.type_.to_lower or config.anystr_lower + if lower: + v = v.lower() + return v + + +def validate_json(v: Any, config: 'BaseConfig') -> Any: + if v is None: + # pass None through to other validators + return v + try: + return config.json_loads(v) # type: ignore + except ValueError: + raise errors.JsonError() + except TypeError: + raise errors.JsonTypeError() + + +T = TypeVar('T') + + +def make_arbitrary_type_validator(type_: Type[T]) -> Callable[[T], T]: + def arbitrary_type_validator(v: Any) -> T: + if isinstance(v, type_): + return v + raise errors.ArbitraryTypeError(expected_arbitrary_type=type_) + + return arbitrary_type_validator + + +def make_class_validator(type_: Type[T]) -> Callable[[Any], Type[T]]: + def class_validator(v: Any) -> Type[T]: + if lenient_issubclass(v, type_): + return v + raise errors.SubclassError(expected_class=type_) + + return class_validator + + +def any_class_validator(v: Any) -> Type[T]: + if isinstance(v, type): + return v + raise errors.ClassError() + + +def none_validator(v: Any) -> 'Literal[None]': + if v is None: + return v + raise errors.NotNoneError() + + +def pattern_validator(v: Any) -> Pattern[str]: + if isinstance(v, Pattern): + return v + + str_value = str_validator(v) + + try: + return re.compile(str_value) + except re.error: + raise errors.PatternError() + + +NamedTupleT = TypeVar('NamedTupleT', bound=NamedTuple) + + +def make_namedtuple_validator( + namedtuple_cls: Type[NamedTupleT], config: Type['BaseConfig'] +) -> Callable[[Tuple[Any, ...]], NamedTupleT]: + from .annotated_types import create_model_from_namedtuple + + NamedTupleModel = create_model_from_namedtuple( + namedtuple_cls, + __config__=config, + __module__=namedtuple_cls.__module__, + ) + namedtuple_cls.__pydantic_model__ = NamedTupleModel # type: ignore[attr-defined] + + def namedtuple_validator(values: Tuple[Any, ...]) -> NamedTupleT: + annotations = NamedTupleModel.__annotations__ + + if len(values) > len(annotations): + raise errors.ListMaxLengthError(limit_value=len(annotations)) + + dict_values: Dict[str, Any] = dict(zip(annotations, values)) + validated_dict_values: Dict[str, Any] = dict(NamedTupleModel(**dict_values)) + return namedtuple_cls(**validated_dict_values) + + return namedtuple_validator + + +def make_typeddict_validator( + typeddict_cls: Type['TypedDict'], config: Type['BaseConfig'] # type: ignore[valid-type] +) -> Callable[[Any], Dict[str, Any]]: + from .annotated_types import create_model_from_typeddict + + TypedDictModel = create_model_from_typeddict( + typeddict_cls, + __config__=config, + __module__=typeddict_cls.__module__, + ) + typeddict_cls.__pydantic_model__ = TypedDictModel # type: ignore[attr-defined] + + def typeddict_validator(values: 'TypedDict') -> Dict[str, Any]: # type: ignore[valid-type] + return TypedDictModel.parse_obj(values).dict(exclude_unset=True) + + return typeddict_validator + + +class IfConfig: + def __init__(self, validator: AnyCallable, *config_attr_names: str, ignored_value: Any = False) -> None: + self.validator = validator + self.config_attr_names = config_attr_names + self.ignored_value = ignored_value + + def check(self, config: Type['BaseConfig']) -> bool: + return any(getattr(config, name) not in {None, self.ignored_value} for name in self.config_attr_names) + + +# order is important here, for example: bool is a subclass of int so has to come first, datetime before date same, +# IPv4Interface before IPv4Address, etc +_VALIDATORS: List[Tuple[Type[Any], List[Any]]] = [ + (IntEnum, [int_validator, enum_member_validator]), + (Enum, [enum_member_validator]), + ( + str, + [ + str_validator, + IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'), + IfConfig(anystr_upper, 'anystr_upper'), + IfConfig(anystr_lower, 'anystr_lower'), + IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'), + ], + ), + ( + bytes, + [ + bytes_validator, + IfConfig(anystr_strip_whitespace, 'anystr_strip_whitespace'), + IfConfig(anystr_upper, 'anystr_upper'), + IfConfig(anystr_lower, 'anystr_lower'), + IfConfig(anystr_length_validator, 'min_anystr_length', 'max_anystr_length'), + ], + ), + (bool, [bool_validator]), + (int, [int_validator]), + (float, [float_validator, IfConfig(float_finite_validator, 'allow_inf_nan', ignored_value=True)]), + (Path, [path_validator]), + (datetime, [parse_datetime]), + (date, [parse_date]), + (time, [parse_time]), + (timedelta, [parse_duration]), + (OrderedDict, [ordered_dict_validator]), + (dict, [dict_validator]), + (list, [list_validator]), + (tuple, [tuple_validator]), + (set, [set_validator]), + (frozenset, [frozenset_validator]), + (deque, [deque_validator]), + (UUID, [uuid_validator]), + (Decimal, [decimal_validator]), + (IPv4Interface, [ip_v4_interface_validator]), + (IPv6Interface, [ip_v6_interface_validator]), + (IPv4Address, [ip_v4_address_validator]), + (IPv6Address, [ip_v6_address_validator]), + (IPv4Network, [ip_v4_network_validator]), + (IPv6Network, [ip_v6_network_validator]), +] + + +def find_validators( # noqa: C901 (ignore complexity) + type_: Type[Any], config: Type['BaseConfig'] +) -> Generator[AnyCallable, None, None]: + from .dataclasses import is_builtin_dataclass, make_dataclass_validator + + if type_ is Any or type_ is object: + return + type_type = type_.__class__ + if type_type == ForwardRef or type_type == TypeVar: + return + + if is_none_type(type_): + yield none_validator + return + if type_ is Pattern or type_ is re.Pattern: + yield pattern_validator + return + if type_ is Hashable or type_ is CollectionsHashable: + yield hashable_validator + return + if is_callable_type(type_): + yield callable_validator + return + if is_literal_type(type_): + yield make_literal_validator(type_) + return + if is_builtin_dataclass(type_): + yield from make_dataclass_validator(type_, config) + return + if type_ is Enum: + yield enum_validator + return + if type_ is IntEnum: + yield int_enum_validator + return + if is_namedtuple(type_): + yield tuple_validator + yield make_namedtuple_validator(type_, config) + return + if is_typeddict(type_): + yield make_typeddict_validator(type_, config) + return + + class_ = get_class(type_) + if class_ is not None: + if class_ is not Any and isinstance(class_, type): + yield make_class_validator(class_) + else: + yield any_class_validator + return + + for val_type, validators in _VALIDATORS: + try: + if issubclass(type_, val_type): + for v in validators: + if isinstance(v, IfConfig): + if v.check(config): + yield v.validator + else: + yield v + return + except TypeError: + raise RuntimeError(f'error checking inheritance of {type_!r} (type: {display_as_type(type_)})') + + if config.arbitrary_types_allowed: + yield make_arbitrary_type_validator(type_) + else: + raise RuntimeError(f'no validator found for {type_}, see `arbitrary_types_allowed` in Config') diff --git a/lib/pydantic/version.py b/lib/pydantic/version.py new file mode 100644 index 00000000..32c61633 --- /dev/null +++ b/lib/pydantic/version.py @@ -0,0 +1,38 @@ +__all__ = 'compiled', 'VERSION', 'version_info' + +VERSION = '1.10.2' + +try: + import cython # type: ignore +except ImportError: + compiled: bool = False +else: # pragma: no cover + try: + compiled = cython.compiled + except AttributeError: + compiled = False + + +def version_info() -> str: + import platform + import sys + from importlib import import_module + from pathlib import Path + + optional_deps = [] + for p in ('devtools', 'dotenv', 'email-validator', 'typing-extensions'): + try: + import_module(p.replace('-', '_')) + except ImportError: + continue + optional_deps.append(p) + + info = { + 'pydantic version': VERSION, + 'pydantic compiled': compiled, + 'install path': Path(__file__).resolve().parent, + 'python version': sys.version, + 'platform': platform.platform(), + 'optional deps. installed': optional_deps, + } + return '\n'.join('{:>30} {}'.format(k + ':', str(v).replace('\n', ' ')) for k, v in info.items()) diff --git a/lib/typing_extensions.py b/lib/typing_extensions.py index 9f1c7aa3..ef42417c 100644 --- a/lib/typing_extensions.py +++ b/lib/typing_extensions.py @@ -1,52 +1,28 @@ import abc import collections import collections.abc +import functools import operator import sys +import types as _types import typing -# After PEP 560, internal typing API was substantially reworked. -# This is especially important for Protocol class which uses internal APIs -# quite extensively. -PEP_560 = sys.version_info[:3] >= (3, 7, 0) -if PEP_560: - GenericMeta = type -else: - # 3.6 - from typing import GenericMeta, _type_vars # noqa - -# The two functions below are copies of typing internal helpers. -# They are needed by _ProtocolMeta - - -def _no_slots_copy(dct): - dict_copy = dict(dct) - if '__slots__' in dict_copy: - for slot in dict_copy['__slots__']: - dict_copy.pop(slot, None) - return dict_copy - - -def _check_generic(cls, parameters): - if not cls.__parameters__: - raise TypeError(f"{cls} is not a generic class") - alen = len(parameters) - elen = len(cls.__parameters__) - if alen != elen: - raise TypeError(f"Too {'many' if alen > elen else 'few'} arguments for {cls};" - f" actual {alen}, expected {elen}") - - -# Please keep __all__ alphabetized within each category. __all__ = [ # Super-special typing primitives. + 'Any', 'ClassVar', 'Concatenate', 'Final', + 'LiteralString', 'ParamSpec', + 'ParamSpecArgs', + 'ParamSpecKwargs', 'Self', 'Type', + 'TypeVar', + 'TypeVarTuple', + 'Unpack', # ABCs (from collections.abc). 'Awaitable', @@ -62,6 +38,7 @@ __all__ = [ 'Counter', 'Deque', 'DefaultDict', + 'NamedTuple', 'OrderedDict', 'TypedDict', @@ -70,49 +47,101 @@ __all__ = [ # One-off things. 'Annotated', + 'assert_never', + 'assert_type', + 'clear_overloads', + 'dataclass_transform', + 'get_overloads', 'final', + 'get_args', + 'get_origin', + 'get_type_hints', 'IntVar', + 'is_typeddict', 'Literal', 'NewType', 'overload', + 'override', 'Protocol', + 'reveal_type', 'runtime', 'runtime_checkable', 'Text', 'TypeAlias', 'TypeGuard', 'TYPE_CHECKING', + 'Never', + 'NoReturn', + 'Required', + 'NotRequired', ] -if PEP_560: - __all__.extend(["get_args", "get_origin", "get_type_hints"]) +# for backward compatibility +PEP_560 = True +GenericMeta = type -# 3.6.2+ -if hasattr(typing, 'NoReturn'): - NoReturn = typing.NoReturn -# 3.6.0-3.6.1 +# The functions below are modified copies of typing internal helpers. +# They are needed by _ProtocolMeta and they provide support for PEP 646. + +_marker = object() + + +def _check_generic(cls, parameters, elen=_marker): + """Check correct count for parameters of a generic cls (internal helper). + This gives a nice error message in case of count mismatch. + """ + if not elen: + raise TypeError(f"{cls} is not a generic class") + if elen is _marker: + if not hasattr(cls, "__parameters__") or not cls.__parameters__: + raise TypeError(f"{cls} is not a generic class") + elen = len(cls.__parameters__) + alen = len(parameters) + if alen != elen: + if hasattr(cls, "__parameters__"): + parameters = [p for p in cls.__parameters__ if not _is_unpack(p)] + num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters) + if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples): + return + raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};" + f" actual {alen}, expected {elen}") + + +if sys.version_info >= (3, 10): + def _should_collect_from_parameters(t): + return isinstance( + t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType) + ) +elif sys.version_info >= (3, 9): + def _should_collect_from_parameters(t): + return isinstance(t, (typing._GenericAlias, _types.GenericAlias)) else: - class _NoReturn(typing._FinalTypingBase, _root=True): - """Special type indicating functions that never return. - Example:: + def _should_collect_from_parameters(t): + return isinstance(t, typing._GenericAlias) and not t._special - from typing import NoReturn - def stop() -> NoReturn: - raise Exception('no way') +def _collect_type_vars(types, typevar_types=None): + """Collect all type variable contained in types in order of + first appearance (lexicographic order). For example:: - This type is invalid in other positions, e.g., ``List[NoReturn]`` - will fail in static type checkers. - """ - __slots__ = () + _collect_type_vars((T, List[S, T])) == (T, S) + """ + if typevar_types is None: + typevar_types = typing.TypeVar + tvars = [] + for t in types: + if ( + isinstance(t, typevar_types) and + t not in tvars and + not _is_unpack(t) + ): + tvars.append(t) + if _should_collect_from_parameters(t): + tvars.extend([t for t in t.__parameters__ if t not in tvars]) + return tuple(tvars) - def __instancecheck__(self, obj): - raise TypeError("NoReturn cannot be used with isinstance().") - def __subclasscheck__(self, cls): - raise TypeError("NoReturn cannot be used with issubclass().") - - NoReturn = _NoReturn(_root=True) +NoReturn = typing.NoReturn # Some unconstrained type variables. These are used by the container types. # (These are not for export.) @@ -122,6 +151,37 @@ VT = typing.TypeVar('VT') # Value type. T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. + +if sys.version_info >= (3, 11): + from typing import Any +else: + + class _AnyMeta(type): + def __instancecheck__(self, obj): + if self is Any: + raise TypeError("typing_extensions.Any cannot be used with isinstance()") + return super().__instancecheck__(obj) + + def __repr__(self): + if self is Any: + return "typing_extensions.Any" + return super().__repr__() + + class Any(metaclass=_AnyMeta): + """Special type indicating an unconstrained type. + - Any is compatible with every type. + - Any assumed to have all methods. + - All values assumed to be instances of Any. + Note that all the above statements are true from the point of view of + static type checkers. At runtime, Any should not be used with instance + checks. + """ + def __new__(cls, *args, **kwargs): + if cls is Any: + raise TypeError("Any cannot be instantiated") + return super().__new__(cls, *args, **kwargs) + + ClassVar = typing.ClassVar # On older versions of typing there is an internal class named "Final". @@ -129,7 +189,7 @@ ClassVar = typing.ClassVar if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): Final = typing.Final # 3.7 -elif sys.version_info[:2] >= (3, 7): +else: class _FinalForm(typing._SpecialForm, _root=True): def __repr__(self): @@ -137,7 +197,7 @@ elif sys.version_info[:2] >= (3, 7): def __getitem__(self, parameters): item = typing._type_check(parameters, - f'{self._name} accepts only single type') + f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) Final = _FinalForm('Final', @@ -154,67 +214,13 @@ elif sys.version_info[:2] >= (3, 7): TIMEOUT = 1 # Error reported by type checker There is no runtime checking of these properties.""") -# 3.6 -else: - class _Final(typing._FinalTypingBase, _root=True): - """A special typing construct to indicate that a name - cannot be re-assigned or overridden in a subclass. - For example: - MAX_SIZE: Final = 9000 - MAX_SIZE += 1 # Error reported by type checker - - class Connection: - TIMEOUT: Final[int] = 10 - class FastConnector(Connection): - TIMEOUT = 1 # Error reported by type checker - - There is no runtime checking of these properties. - """ - - __slots__ = ('__type__',) - - def __init__(self, tp=None, **kwds): - self.__type__ = tp - - def __getitem__(self, item): - cls = type(self) - if self.__type__ is None: - return cls(typing._type_check(item, - f'{cls.__name__[1:]} accepts only single type.'), - _root=True) - raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') - - def _eval_type(self, globalns, localns): - new_tp = typing._eval_type(self.__type__, globalns, localns) - if new_tp == self.__type__: - return self - return type(self)(new_tp, _root=True) - - def __repr__(self): - r = super().__repr__() - if self.__type__ is not None: - r += f'[{typing._type_repr(self.__type__)}]' - return r - - def __hash__(self): - return hash((type(self).__name__, self.__type__)) - - def __eq__(self, other): - if not isinstance(other, _Final): - return NotImplemented - if self.__type__ is not None: - return self.__type__ == other.__type__ - return self is other - - Final = _Final(_root=True) - - -# 3.8+ -if hasattr(typing, 'final'): +if sys.version_info >= (3, 11): final = typing.final -# 3.6-3.7 else: + # @final exists in 3.8+, but we backport it for all versions + # before 3.11 to keep support for the __final__ attribute. + # See https://bugs.python.org/issue46342 def final(f): """This decorator can be used to indicate to type checkers that the decorated method cannot be overridden, and decorated class @@ -233,8 +239,17 @@ else: class Other(Leaf): # Error reported by type checker ... - There is no runtime checking of these properties. + There is no runtime checking of these properties. The decorator + sets the ``__final__`` attribute to ``True`` on the decorated object + to allow runtime introspection. """ + try: + f.__final__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass return f @@ -246,7 +261,7 @@ def IntVar(name): if hasattr(typing, 'Literal'): Literal = typing.Literal # 3.7: -elif sys.version_info[:2] >= (3, 7): +else: class _LiteralForm(typing._SpecialForm, _root=True): def __repr__(self): @@ -268,59 +283,75 @@ elif sys.version_info[:2] >= (3, 7): Literal[...] cannot be subclassed. There is no runtime checking verifying that the parameter is actually a value instead of a type.""") -# 3.6: -else: - class _Literal(typing._FinalTypingBase, _root=True): - """A type that can be used to indicate to type checkers that the - corresponding value has a value literally equivalent to the - provided parameter. For example: - - var: Literal[4] = 4 - - The type checker understands that 'var' is literally equal to the - value 4 and no other value. - - Literal[...] cannot be subclassed. There is no runtime checking - verifying that the parameter is actually a value instead of a type. - """ - - __slots__ = ('__values__',) - - def __init__(self, values=None, **kwds): - self.__values__ = values - - def __getitem__(self, values): - cls = type(self) - if self.__values__ is None: - if not isinstance(values, tuple): - values = (values,) - return cls(values, _root=True) - raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') - - def _eval_type(self, globalns, localns): - return self - - def __repr__(self): - r = super().__repr__() - if self.__values__ is not None: - r += f'[{", ".join(map(typing._type_repr, self.__values__))}]' - return r - - def __hash__(self): - return hash((type(self).__name__, self.__values__)) - - def __eq__(self, other): - if not isinstance(other, _Literal): - return NotImplemented - if self.__values__ is not None: - return self.__values__ == other.__values__ - return self is other - - Literal = _Literal(_root=True) _overload_dummy = typing._overload_dummy # noqa -overload = typing.overload + + +if hasattr(typing, "get_overloads"): # 3.11+ + overload = typing.overload + get_overloads = typing.get_overloads + clear_overloads = typing.clear_overloads +else: + # {module: {qualname: {firstlineno: func}}} + _overload_registry = collections.defaultdict( + functools.partial(collections.defaultdict, dict) + ) + + def overload(func): + """Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + + The overloads for a function can be retrieved at runtime using the + get_overloads() function. + """ + # classmethod and staticmethod + f = getattr(func, "__func__", func) + try: + _overload_registry[f.__module__][f.__qualname__][ + f.__code__.co_firstlineno + ] = func + except AttributeError: + # Not a normal function; ignore. + pass + return _overload_dummy + + def get_overloads(func): + """Return all defined overloads for *func* as a sequence.""" + # classmethod and staticmethod + f = getattr(func, "__func__", func) + if f.__module__ not in _overload_registry: + return [] + mod_dict = _overload_registry[f.__module__] + if f.__qualname__ not in mod_dict: + return [] + return list(mod_dict[f.__qualname__].values()) + + def clear_overloads(): + """Clear all overloads in the registry.""" + _overload_registry.clear() # This is not a real generic class. Don't use outside annotations. @@ -330,154 +361,30 @@ Type = typing.Type # A few are simply re-exported for completeness. -class _ExtensionsGenericMeta(GenericMeta): - def __subclasscheck__(self, subclass): - """This mimics a more modern GenericMeta.__subclasscheck__() logic - (that does not have problems with recursion) to work around interactions - between collections, typing, and typing_extensions on older - versions of Python, see https://github.com/python/typing/issues/501. - """ - if self.__origin__ is not None: - if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: - raise TypeError("Parameterized generics cannot be used with class " - "or instance checks") - return False - if not self.__extra__: - return super().__subclasscheck__(subclass) - res = self.__extra__.__subclasshook__(subclass) - if res is not NotImplemented: - return res - if self.__extra__ in subclass.__mro__: - return True - for scls in self.__extra__.__subclasses__(): - if isinstance(scls, GenericMeta): - continue - if issubclass(subclass, scls): - return True - return False - - Awaitable = typing.Awaitable Coroutine = typing.Coroutine AsyncIterable = typing.AsyncIterable AsyncIterator = typing.AsyncIterator - -# 3.6.1+ -if hasattr(typing, 'Deque'): - Deque = typing.Deque -# 3.6.0 -else: - class Deque(collections.deque, typing.MutableSequence[T], - metaclass=_ExtensionsGenericMeta, - extra=collections.deque): - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is Deque: - return collections.deque(*args, **kwds) - return typing._generic_new(collections.deque, cls, *args, **kwds) - +Deque = typing.Deque ContextManager = typing.ContextManager -# 3.6.2+ -if hasattr(typing, 'AsyncContextManager'): - AsyncContextManager = typing.AsyncContextManager -# 3.6.0-3.6.1 -else: - from _collections_abc import _check_methods as _check_methods_in_mro # noqa - - class AsyncContextManager(typing.Generic[T_co]): - __slots__ = () - - async def __aenter__(self): - return self - - @abc.abstractmethod - async def __aexit__(self, exc_type, exc_value, traceback): - return None - - @classmethod - def __subclasshook__(cls, C): - if cls is AsyncContextManager: - return _check_methods_in_mro(C, "__aenter__", "__aexit__") - return NotImplemented - +AsyncContextManager = typing.AsyncContextManager DefaultDict = typing.DefaultDict # 3.7.2+ if hasattr(typing, 'OrderedDict'): OrderedDict = typing.OrderedDict # 3.7.0-3.7.2 -elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2): +else: OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) -# 3.6 -else: - class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT], - metaclass=_ExtensionsGenericMeta, - extra=collections.OrderedDict): - - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is OrderedDict: - return collections.OrderedDict(*args, **kwds) - return typing._generic_new(collections.OrderedDict, cls, *args, **kwds) - -# 3.6.2+ -if hasattr(typing, 'Counter'): - Counter = typing.Counter -# 3.6.0-3.6.1 -else: - class Counter(collections.Counter, - typing.Dict[T, int], - metaclass=_ExtensionsGenericMeta, extra=collections.Counter): - - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is Counter: - return collections.Counter(*args, **kwds) - return typing._generic_new(collections.Counter, cls, *args, **kwds) - -# 3.6.1+ -if hasattr(typing, 'ChainMap'): - ChainMap = typing.ChainMap -elif hasattr(collections, 'ChainMap'): - class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT], - metaclass=_ExtensionsGenericMeta, - extra=collections.ChainMap): - - __slots__ = () - - def __new__(cls, *args, **kwds): - if cls._gorg is ChainMap: - return collections.ChainMap(*args, **kwds) - return typing._generic_new(collections.ChainMap, cls, *args, **kwds) - -# 3.6.1+ -if hasattr(typing, 'AsyncGenerator'): - AsyncGenerator = typing.AsyncGenerator -# 3.6.0 -else: - class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra], - metaclass=_ExtensionsGenericMeta, - extra=collections.abc.AsyncGenerator): - __slots__ = () +Counter = typing.Counter +ChainMap = typing.ChainMap +AsyncGenerator = typing.AsyncGenerator NewType = typing.NewType Text = typing.Text TYPE_CHECKING = typing.TYPE_CHECKING -def _gorg(cls): - """This function exists for compatibility with old typing versions.""" - assert isinstance(cls, GenericMeta) - if hasattr(cls, '_gorg'): - return cls._gorg - while cls.__origin__ is not None: - cls = cls.__origin__ - return cls - - _PROTO_WHITELIST = ['Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', @@ -507,18 +414,57 @@ def _is_callable_members_only(cls): return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) +def _maybe_adjust_parameters(cls): + """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__. + + The contents of this function are very similar + to logic found in typing.Generic.__init_subclass__ + on the CPython main branch. + """ + tvars = [] + if '__orig_bases__' in cls.__dict__: + tvars = typing._collect_type_vars(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...] and/or Protocol[...]. + gvars = None + for base in cls.__orig_bases__: + if (isinstance(base, typing._GenericAlias) and + base.__origin__ in (typing.Generic, Protocol)): + # for error messages + the_base = base.__origin__.__name__ + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...]" + " and/or Protocol[...] multiple types.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) + s_args = ', '.join(str(g) for g in gvars) + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in {the_base}[{s_args}]") + tvars = gvars + cls.__parameters__ = tuple(tvars) + + # 3.8+ if hasattr(typing, 'Protocol'): Protocol = typing.Protocol # 3.7 -elif PEP_560: - from typing import _collect_type_vars # noqa +else: def _no_init(self, *args, **kwargs): if type(self)._is_protocol: raise TypeError('Protocols cannot be instantiated') - class _ProtocolMeta(abc.ABCMeta): + class _ProtocolMeta(abc.ABCMeta): # noqa: B024 # This metaclass is a bit unfortunate and exists only because of the lack # of __instancehook__. def __instancecheck__(cls, instance): @@ -600,47 +546,17 @@ elif PEP_560: "Parameters to Protocol[...] must all be unique") else: # Subscripting a regular Generic subclass. - _check_generic(cls, params) + _check_generic(cls, params, len(cls.__parameters__)) return typing._GenericAlias(cls, params) def __init_subclass__(cls, *args, **kwargs): - tvars = [] if '__orig_bases__' in cls.__dict__: error = typing.Generic in cls.__orig_bases__ else: error = typing.Generic in cls.__bases__ if error: raise TypeError("Cannot inherit from plain Generic") - if '__orig_bases__' in cls.__dict__: - tvars = _collect_type_vars(cls.__orig_bases__) - # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. - # If found, tvars must be a subset of it. - # If not found, tvars is it. - # Also check for and reject plain Generic, - # and reject multiple Generic[...] and/or Protocol[...]. - gvars = None - for base in cls.__orig_bases__: - if (isinstance(base, typing._GenericAlias) and - base.__origin__ in (typing.Generic, Protocol)): - # for error messages - the_base = base.__origin__.__name__ - if gvars is not None: - raise TypeError( - "Cannot inherit from Generic[...]" - " and/or Protocol[...] multiple types.") - gvars = base.__parameters__ - if gvars is None: - gvars = tvars - else: - tvarset = set(tvars) - gvarset = set(gvars) - if not tvarset <= gvarset: - s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) - s_args = ', '.join(str(g) for g in gvars) - raise TypeError(f"Some type variables ({s_vars}) are" - f" not listed in {the_base}[{s_args}]") - tvars = gvars - cls.__parameters__ = tuple(tvars) + _maybe_adjust_parameters(cls) # Determine if this is a protocol or a concrete subclass. if not cls.__dict__.get('_is_protocol', None): @@ -694,250 +610,12 @@ elif PEP_560: raise TypeError('Protocols can only inherit from other' f' protocols, got {repr(base)}') cls.__init__ = _no_init -# 3.6 -else: - from typing import _next_in_mro, _type_check # noqa - - def _no_init(self, *args, **kwargs): - if type(self)._is_protocol: - raise TypeError('Protocols cannot be instantiated') - - class _ProtocolMeta(GenericMeta): - """Internal metaclass for Protocol. - - This exists so Protocol classes can be generic without deriving - from Generic. - """ - def __new__(cls, name, bases, namespace, - tvars=None, args=None, origin=None, extra=None, orig_bases=None): - # This is just a version copied from GenericMeta.__new__ that - # includes "Protocol" special treatment. (Comments removed for brevity.) - assert extra is None # Protocols should not have extra - if tvars is not None: - assert origin is not None - assert all(isinstance(t, typing.TypeVar) for t in tvars), tvars - else: - tvars = _type_vars(bases) - gvars = None - for base in bases: - if base is typing.Generic: - raise TypeError("Cannot inherit from plain Generic") - if (isinstance(base, GenericMeta) and - base.__origin__ in (typing.Generic, Protocol)): - if gvars is not None: - raise TypeError( - "Cannot inherit from Generic[...] or" - " Protocol[...] multiple times.") - gvars = base.__parameters__ - if gvars is None: - gvars = tvars - else: - tvarset = set(tvars) - gvarset = set(gvars) - if not tvarset <= gvarset: - s_vars = ", ".join(str(t) for t in tvars if t not in gvarset) - s_args = ", ".join(str(g) for g in gvars) - cls_name = "Generic" if any(b.__origin__ is typing.Generic - for b in bases) else "Protocol" - raise TypeError(f"Some type variables ({s_vars}) are" - f" not listed in {cls_name}[{s_args}]") - tvars = gvars - - initial_bases = bases - if (extra is not None and type(extra) is abc.ABCMeta and - extra not in bases): - bases = (extra,) + bases - bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b - for b in bases) - if any(isinstance(b, GenericMeta) and b is not typing.Generic for b in bases): - bases = tuple(b for b in bases if b is not typing.Generic) - namespace.update({'__origin__': origin, '__extra__': extra}) - self = super(GenericMeta, cls).__new__(cls, name, bases, namespace, - _root=True) - super(GenericMeta, self).__setattr__('_gorg', - self if not origin else - _gorg(origin)) - self.__parameters__ = tvars - self.__args__ = tuple(... if a is typing._TypingEllipsis else - () if a is typing._TypingEmpty else - a for a in args) if args else None - self.__next_in_mro__ = _next_in_mro(self) - if orig_bases is None: - self.__orig_bases__ = initial_bases - elif origin is not None: - self._abc_registry = origin._abc_registry - self._abc_cache = origin._abc_cache - if hasattr(self, '_subs_tree'): - self.__tree_hash__ = (hash(self._subs_tree()) if origin else - super(GenericMeta, self).__hash__()) - return self - - def __init__(cls, *args, **kwargs): - super().__init__(*args, **kwargs) - if not cls.__dict__.get('_is_protocol', None): - cls._is_protocol = any(b is Protocol or - isinstance(b, _ProtocolMeta) and - b.__origin__ is Protocol - for b in cls.__bases__) - if cls._is_protocol: - for base in cls.__mro__[1:]: - if not (base in (object, typing.Generic) or - base.__module__ == 'collections.abc' and - base.__name__ in _PROTO_WHITELIST or - isinstance(base, typing.TypingMeta) and base._is_protocol or - isinstance(base, GenericMeta) and - base.__origin__ is typing.Generic): - raise TypeError(f'Protocols can only inherit from other' - f' protocols, got {repr(base)}') - - cls.__init__ = _no_init - - def _proto_hook(other): - if not cls.__dict__.get('_is_protocol', None): - return NotImplemented - if not isinstance(other, type): - # Same error as for issubclass(1, int) - raise TypeError('issubclass() arg 1 must be a class') - for attr in _get_protocol_attrs(cls): - for base in other.__mro__: - if attr in base.__dict__: - if base.__dict__[attr] is None: - return NotImplemented - break - annotations = getattr(base, '__annotations__', {}) - if (isinstance(annotations, typing.Mapping) and - attr in annotations and - isinstance(other, _ProtocolMeta) and - other._is_protocol): - break - else: - return NotImplemented - return True - if '__subclasshook__' not in cls.__dict__: - cls.__subclasshook__ = _proto_hook - - def __instancecheck__(self, instance): - # We need this method for situations where attributes are - # assigned in __init__. - if ((not getattr(self, '_is_protocol', False) or - _is_callable_members_only(self)) and - issubclass(instance.__class__, self)): - return True - if self._is_protocol: - if all(hasattr(instance, attr) and - (not callable(getattr(self, attr, None)) or - getattr(instance, attr) is not None) - for attr in _get_protocol_attrs(self)): - return True - return super(GenericMeta, self).__instancecheck__(instance) - - def __subclasscheck__(self, cls): - if self.__origin__ is not None: - if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']: - raise TypeError("Parameterized generics cannot be used with class " - "or instance checks") - return False - if (self.__dict__.get('_is_protocol', None) and - not self.__dict__.get('_is_runtime_protocol', None)): - if sys._getframe(1).f_globals['__name__'] in ['abc', - 'functools', - 'typing']: - return False - raise TypeError("Instance and class checks can only be used with" - " @runtime protocols") - if (self.__dict__.get('_is_runtime_protocol', None) and - not _is_callable_members_only(self)): - if sys._getframe(1).f_globals['__name__'] in ['abc', - 'functools', - 'typing']: - return super(GenericMeta, self).__subclasscheck__(cls) - raise TypeError("Protocols with non-method members" - " don't support issubclass()") - return super(GenericMeta, self).__subclasscheck__(cls) - - @typing._tp_cache - def __getitem__(self, params): - # We also need to copy this from GenericMeta.__getitem__ to get - # special treatment of "Protocol". (Comments removed for brevity.) - if not isinstance(params, tuple): - params = (params,) - if not params and _gorg(self) is not typing.Tuple: - raise TypeError( - f"Parameter list to {self.__qualname__}[...] cannot be empty") - msg = "Parameters to generic types must be types." - params = tuple(_type_check(p, msg) for p in params) - if self in (typing.Generic, Protocol): - if not all(isinstance(p, typing.TypeVar) for p in params): - raise TypeError( - f"Parameters to {repr(self)}[...] must all be type variables") - if len(set(params)) != len(params): - raise TypeError( - f"Parameters to {repr(self)}[...] must all be unique") - tvars = params - args = params - elif self in (typing.Tuple, typing.Callable): - tvars = _type_vars(params) - args = params - elif self.__origin__ in (typing.Generic, Protocol): - raise TypeError(f"Cannot subscript already-subscripted {repr(self)}") - else: - _check_generic(self, params) - tvars = _type_vars(params) - args = params - - prepend = (self,) if self.__origin__ is None else () - return self.__class__(self.__name__, - prepend + self.__bases__, - _no_slots_copy(self.__dict__), - tvars=tvars, - args=args, - origin=self, - extra=self.__extra__, - orig_bases=self.__orig_bases__) - - class Protocol(metaclass=_ProtocolMeta): - """Base class for protocol classes. Protocol classes are defined as:: - - class Proto(Protocol): - def meth(self) -> int: - ... - - Such classes are primarily used with static type checkers that recognize - structural subtyping (static duck-typing), for example:: - - class C: - def meth(self) -> int: - return 0 - - def func(x: Proto) -> int: - return x.meth() - - func(C()) # Passes static type check - - See PEP 544 for details. Protocol classes decorated with - @typing_extensions.runtime act as simple-minded runtime protocol that checks - only the presence of given attributes, ignoring their type signatures. - - Protocol classes can be generic, they are defined as:: - - class GenProto(Protocol[T]): - def meth(self) -> T: - ... - """ - __slots__ = () - _is_protocol = True - - def __new__(cls, *args, **kwds): - if _gorg(cls) is Protocol: - raise TypeError("Type Protocol cannot be instantiated; " - "it can be used only as a base class") - return typing._generic_new(cls.__next_in_mro__, cls, *args, **kwds) # 3.8+ if hasattr(typing, 'runtime_checkable'): runtime_checkable = typing.runtime_checkable -# 3.6-3.7 +# 3.7 else: def runtime_checkable(cls): """Mark a protocol class as a runtime protocol, so that it @@ -961,7 +639,7 @@ runtime = runtime_checkable # 3.8+ if hasattr(typing, 'SupportsIndex'): SupportsIndex = typing.SupportsIndex -# 3.6-3.7 +# 3.7 else: @runtime_checkable class SupportsIndex(Protocol): @@ -972,12 +650,17 @@ else: pass -if sys.version_info >= (3, 9, 2): +if hasattr(typing, "Required"): # The standard library TypedDict in Python 3.8 does not store runtime information # about which (if any) keys are optional. See https://bugs.python.org/issue38834 # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 + # The standard library TypedDict below Python 3.11 does not store runtime + # information about optional and required keys when using Required or NotRequired. + # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11. TypedDict = typing.TypedDict + _TypedDictMeta = typing._TypedDictMeta + is_typeddict = typing.is_typeddict else: def _check_fails(cls, other): try: @@ -1057,11 +740,18 @@ else: # Subclasses and instances of TypedDict return actual dictionaries # via _dict_new. ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new + # Don't insert typing.Generic into __bases__ here, + # or Generic.__init_subclass__ will raise TypeError + # in the super().__new__() call. + # Instead, monkey-patch __bases__ onto the class after it's been created. tp_dict = super().__new__(cls, name, (dict,), ns) + if any(issubclass(base, typing.Generic) for base in bases): + tp_dict.__bases__ = (typing.Generic, dict) + _maybe_adjust_parameters(tp_dict) + annotations = {} own_annotations = ns.get('__annotations__', {}) - own_annotation_keys = set(own_annotations.keys()) msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" own_annotations = { n: typing._type_check(tp, msg) for n, tp in own_annotations.items() @@ -1075,10 +765,22 @@ else: optional_keys.update(base.__dict__.get('__optional_keys__', ())) annotations.update(own_annotations) - if total: - required_keys.update(own_annotation_keys) - else: - optional_keys.update(own_annotation_keys) + for annotation_key, annotation_type in own_annotations.items(): + annotation_origin = get_origin(annotation_type) + if annotation_origin is Annotated: + annotation_args = get_args(annotation_type) + if annotation_args: + annotation_type = annotation_args[0] + annotation_origin = get_origin(annotation_type) + + if annotation_origin is Required: + required_keys.add(annotation_key) + elif annotation_origin is NotRequired: + optional_keys.add(annotation_key) + elif total: + required_keys.add(annotation_key) + else: + optional_keys.add(annotation_key) tp_dict.__annotations__ = annotations tp_dict.__required_keys__ = frozenset(required_keys) @@ -1121,16 +823,127 @@ else: syntax forms work for Python 2.7 and 3.2+ """ + if hasattr(typing, "_TypedDictMeta"): + _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta) + else: + _TYPEDDICT_TYPES = (_TypedDictMeta,) -# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints) + def is_typeddict(tp): + """Check if an annotation is a TypedDict class + + For example:: + class Film(TypedDict): + title: str + year: int + + is_typeddict(Film) # => True + is_typeddict(Union[list, str]) # => False + """ + return isinstance(tp, tuple(_TYPEDDICT_TYPES)) + + +if hasattr(typing, "assert_type"): + assert_type = typing.assert_type + +else: + def assert_type(__val, __typ): + """Assert (to the type checker) that the value is of the given type. + + When the type checker encounters a call to assert_type(), it + emits an error if the value is not of the specified type:: + + def greet(name: str) -> None: + assert_type(name, str) # ok + assert_type(name, int) # type checker error + + At runtime this returns the first argument unchanged and otherwise + does nothing. + """ + return __val + + +if hasattr(typing, "Required"): + get_type_hints = typing.get_type_hints +else: + import functools + import types + + # replaces _strip_annotations() + def _strip_extras(t): + """Strips Annotated, Required and NotRequired from a given type.""" + if isinstance(t, _AnnotatedAlias): + return _strip_extras(t.__origin__) + if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired): + return _strip_extras(t.__args__[0]) + if isinstance(t, typing._GenericAlias): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return t.copy_with(stripped_args) + if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return types.GenericAlias(t.__origin__, stripped_args) + if hasattr(types, "UnionType") and isinstance(t, types.UnionType): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return functools.reduce(operator.or_, stripped_args) + + return t + + def get_type_hints(obj, globalns=None, localns=None, include_extras=False): + """Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, adds Optional[t] if a + default value equal to None is set and recursively replaces all + 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' + (unless 'include_extras=True'). + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + """ + if hasattr(typing, "Annotated"): + hint = typing.get_type_hints( + obj, globalns=globalns, localns=localns, include_extras=True + ) + else: + hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) + if include_extras: + return hint + return {k: _strip_extras(t) for k, t in hint.items()} + + +# Python 3.9+ has PEP 593 (Annotated) if hasattr(typing, 'Annotated'): Annotated = typing.Annotated - get_type_hints = typing.get_type_hints # Not exported and not a public API, but needed for get_origin() and get_args() # to work. _AnnotatedAlias = typing._AnnotatedAlias # 3.7-3.8 -elif PEP_560: +else: class _AnnotatedAlias(typing._GenericAlias, _root=True): """Runtime representation of an annotated type. @@ -1214,8 +1027,12 @@ elif PEP_560: raise TypeError("Annotated[...] should be used " "with at least two arguments (a type and an " "annotation).") - msg = "Annotated[t, ...]: t must be a type." - origin = typing._type_check(params[0], msg) + allowed_special_forms = (ClassVar, Final) + if get_origin(params[0]) in allowed_special_forms: + origin = params[0] + else: + msg = "Annotated[t, ...]: t must be a type." + origin = typing._type_check(params[0], msg) metadata = tuple(params[1:]) return _AnnotatedAlias(origin, metadata) @@ -1224,196 +1041,6 @@ elif PEP_560: f"Cannot subclass {cls.__module__}.Annotated" ) - def _strip_annotations(t): - """Strips the annotations from a given type. - """ - if isinstance(t, _AnnotatedAlias): - return _strip_annotations(t.__origin__) - if isinstance(t, typing._GenericAlias): - stripped_args = tuple(_strip_annotations(a) for a in t.__args__) - if stripped_args == t.__args__: - return t - res = t.copy_with(stripped_args) - res._special = t._special - return res - return t - - def get_type_hints(obj, globalns=None, localns=None, include_extras=False): - """Return type hints for an object. - - This is often the same as obj.__annotations__, but it handles - forward references encoded as string literals, adds Optional[t] if a - default value equal to None is set and recursively replaces all - 'Annotated[T, ...]' with 'T' (unless 'include_extras=True'). - - The argument may be a module, class, method, or function. The annotations - are returned as a dictionary. For classes, annotations include also - inherited members. - - TypeError is raised if the argument is not of a type that can contain - annotations, and an empty dictionary is returned if no annotations are - present. - - BEWARE -- the behavior of globalns and localns is counterintuitive - (unless you are familiar with how eval() and exec() work). The - search order is locals first, then globals. - - - If no dict arguments are passed, an attempt is made to use the - globals from obj (or the respective module's globals for classes), - and these are also used as the locals. If the object does not appear - to have globals, an empty dictionary is used. - - - If one dict argument is passed, it is used for both globals and - locals. - - - If two dict arguments are passed, they specify globals and - locals, respectively. - """ - hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) - if include_extras: - return hint - return {k: _strip_annotations(t) for k, t in hint.items()} -# 3.6 -else: - - def _is_dunder(name): - """Returns True if name is a __dunder_variable_name__.""" - return len(name) > 4 and name.startswith('__') and name.endswith('__') - - # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality - # checks, argument expansion etc. are done on the _subs_tre. As a result we - # can't provide a get_type_hints function that strips out annotations. - - class AnnotatedMeta(typing.GenericMeta): - """Metaclass for Annotated""" - - def __new__(cls, name, bases, namespace, **kwargs): - if any(b is not object for b in bases): - raise TypeError("Cannot subclass " + str(Annotated)) - return super().__new__(cls, name, bases, namespace, **kwargs) - - @property - def __metadata__(self): - return self._subs_tree()[2] - - def _tree_repr(self, tree): - cls, origin, metadata = tree - if not isinstance(origin, tuple): - tp_repr = typing._type_repr(origin) - else: - tp_repr = origin[0]._tree_repr(origin) - metadata_reprs = ", ".join(repr(arg) for arg in metadata) - return f'{cls}[{tp_repr}, {metadata_reprs}]' - - def _subs_tree(self, tvars=None, args=None): # noqa - if self is Annotated: - return Annotated - res = super()._subs_tree(tvars=tvars, args=args) - # Flatten nested Annotated - if isinstance(res[1], tuple) and res[1][0] is Annotated: - sub_tp = res[1][1] - sub_annot = res[1][2] - return (Annotated, sub_tp, sub_annot + res[2]) - return res - - def _get_cons(self): - """Return the class used to create instance of this type.""" - if self.__origin__ is None: - raise TypeError("Cannot get the underlying type of a " - "non-specialized Annotated type.") - tree = self._subs_tree() - while isinstance(tree, tuple) and tree[0] is Annotated: - tree = tree[1] - if isinstance(tree, tuple): - return tree[0] - else: - return tree - - @typing._tp_cache - def __getitem__(self, params): - if not isinstance(params, tuple): - params = (params,) - if self.__origin__ is not None: # specializing an instantiated type - return super().__getitem__(params) - elif not isinstance(params, tuple) or len(params) < 2: - raise TypeError("Annotated[...] should be instantiated " - "with at least two arguments (a type and an " - "annotation).") - else: - msg = "Annotated[t, ...]: t must be a type." - tp = typing._type_check(params[0], msg) - metadata = tuple(params[1:]) - return self.__class__( - self.__name__, - self.__bases__, - _no_slots_copy(self.__dict__), - tvars=_type_vars((tp,)), - # Metadata is a tuple so it won't be touched by _replace_args et al. - args=(tp, metadata), - origin=self, - ) - - def __call__(self, *args, **kwargs): - cons = self._get_cons() - result = cons(*args, **kwargs) - try: - result.__orig_class__ = self - except AttributeError: - pass - return result - - def __getattr__(self, attr): - # For simplicity we just don't relay all dunder names - if self.__origin__ is not None and not _is_dunder(attr): - return getattr(self._get_cons(), attr) - raise AttributeError(attr) - - def __setattr__(self, attr, value): - if _is_dunder(attr) or attr.startswith('_abc_'): - super().__setattr__(attr, value) - elif self.__origin__ is None: - raise AttributeError(attr) - else: - setattr(self._get_cons(), attr, value) - - def __instancecheck__(self, obj): - raise TypeError("Annotated cannot be used with isinstance().") - - def __subclasscheck__(self, cls): - raise TypeError("Annotated cannot be used with issubclass().") - - class Annotated(metaclass=AnnotatedMeta): - """Add context specific metadata to a type. - - Example: Annotated[int, runtime_check.Unsigned] indicates to the - hypothetical runtime_check module that this type is an unsigned int. - Every other consumer of this type can ignore this metadata and treat - this type as int. - - The first argument to Annotated must be a valid type, the remaining - arguments are kept as a tuple in the __metadata__ field. - - Details: - - - It's an error to call `Annotated` with less than two arguments. - - Nested Annotated are flattened:: - - Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] - - - Instantiating an annotated type is equivalent to instantiating the - underlying type:: - - Annotated[C, Ann1](5) == C(5) - - - Annotated can be used as a generic type alias:: - - Optimized = Annotated[T, runtime.Optimize()] - Optimized[int] == Annotated[int, runtime.Optimize()] - - OptimizedList = Annotated[List[T], runtime.Optimize()] - OptimizedList[int] == Annotated[List[int], runtime.Optimize()] - """ - # Python 3.8 has get_origin() and get_args() but those implementations aren't # Annotated-aware, so we can't use those. Python 3.9's versions don't support # ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. @@ -1421,7 +1048,7 @@ if sys.version_info[:2] >= (3, 10): get_origin = typing.get_origin get_args = typing.get_args # 3.7-3.9 -elif PEP_560: +else: try: # 3.9+ from typing import _BaseGenericAlias @@ -1429,9 +1056,9 @@ elif PEP_560: _BaseGenericAlias = typing._GenericAlias try: # 3.9+ - from typing import GenericAlias + from typing import GenericAlias as _typing_GenericAlias except ImportError: - GenericAlias = typing._GenericAlias + _typing_GenericAlias = typing._GenericAlias def get_origin(tp): """Get the unsubscripted version of a type. @@ -1450,7 +1077,7 @@ elif PEP_560: """ if isinstance(tp, _AnnotatedAlias): return Annotated - if isinstance(tp, (typing._GenericAlias, GenericAlias, _BaseGenericAlias, + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias, ParamSpecArgs, ParamSpecKwargs)): return tp.__origin__ if tp is typing.Generic: @@ -1470,7 +1097,7 @@ elif PEP_560: """ if isinstance(tp, _AnnotatedAlias): return (tp.__origin__,) + tp.__metadata__ - if isinstance(tp, (typing._GenericAlias, GenericAlias)): + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)): if getattr(tp, "_special", False): return () res = tp.__args__ @@ -1503,7 +1130,7 @@ elif sys.version_info[:2] >= (3, 9): """ raise TypeError(f"{self} is not subscriptable") # 3.7-3.8 -elif sys.version_info[:2] >= (3, 7): +else: class _TypeAliasForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name @@ -1519,44 +1146,51 @@ elif sys.version_info[:2] >= (3, 7): It's invalid when used anywhere except as in the example above.""") -# 3.6 -else: - class _TypeAliasMeta(typing.TypingMeta): - """Metaclass for TypeAlias""" - def __repr__(self): - return 'typing_extensions.TypeAlias' - class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True): - """Special marker indicating that an assignment should - be recognized as a proper type alias definition by type - checkers. +class _DefaultMixin: + """Mixin for TypeVarLike defaults.""" - For example:: + __slots__ = () - Predicate: TypeAlias = Callable[..., bool] + def __init__(self, default): + if isinstance(default, (tuple, list)): + self.__default__ = tuple((typing._type_check(d, "Default must be a type") + for d in default)) + elif default: + self.__default__ = typing._type_check(default, "Default must be a type") + else: + self.__default__ = None - It's invalid when used anywhere except as in the example above. - """ - __slots__ = () - def __instancecheck__(self, obj): - raise TypeError("TypeAlias cannot be used with isinstance().") +# Add default and infer_variance parameters from PEP 696 and 695 +class TypeVar(typing.TypeVar, _DefaultMixin, _root=True): + """Type variable.""" - def __subclasscheck__(self, cls): - raise TypeError("TypeAlias cannot be used with issubclass().") + __module__ = 'typing' - def __repr__(self): - return 'typing_extensions.TypeAlias' + def __init__(self, name, *constraints, bound=None, + covariant=False, contravariant=False, + default=None, infer_variance=False): + super().__init__(name, *constraints, bound=bound, covariant=covariant, + contravariant=contravariant) + _DefaultMixin.__init__(self, default) + self.__infer_variance__ = infer_variance - TypeAlias = _TypeAliasBase(_root=True) + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod # Python 3.10+ has PEP 612 if hasattr(typing, 'ParamSpecArgs'): ParamSpecArgs = typing.ParamSpecArgs ParamSpecKwargs = typing.ParamSpecKwargs -# 3.6-3.9 +# 3.7-3.9 else: class _Immutable: """Mixin to indicate that object should not be copied.""" @@ -1586,6 +1220,11 @@ else: def __repr__(self): return f"{self.__origin__.__name__}.args" + def __eq__(self, other): + if not isinstance(other, ParamSpecArgs): + return NotImplemented + return self.__origin__ == other.__origin__ + class ParamSpecKwargs(_Immutable): """The kwargs for a ParamSpec object. @@ -1604,14 +1243,39 @@ else: def __repr__(self): return f"{self.__origin__.__name__}.kwargs" + def __eq__(self, other): + if not isinstance(other, ParamSpecKwargs): + return NotImplemented + return self.__origin__ == other.__origin__ + # 3.10+ if hasattr(typing, 'ParamSpec'): - ParamSpec = typing.ParamSpec -# 3.6-3.9 + + # Add default Parameter - PEP 696 + class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True): + """Parameter specification variable.""" + + __module__ = 'typing' + + def __init__(self, name, *, bound=None, covariant=False, contravariant=False, + default=None): + super().__init__(name, bound=bound, covariant=covariant, + contravariant=contravariant) + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + +# 3.7-3.9 else: # Inherits from list as a workaround for Callable checks in Python < 3.9.2. - class ParamSpec(list): + class ParamSpec(list, _DefaultMixin): """Parameter specification variable. Usage:: @@ -1669,7 +1333,8 @@ else: def kwargs(self): return ParamSpecKwargs(self) - def __init__(self, name, *, bound=None, covariant=False, contravariant=False): + def __init__(self, name, *, bound=None, covariant=False, contravariant=False, + default=None): super().__init__([self]) self.__name__ = name self.__covariant__ = bool(covariant) @@ -1678,6 +1343,7 @@ else: self.__bound__ = typing._type_check(bound, 'Bound must be a type.') else: self.__bound__ = None + _DefaultMixin.__init__(self, default) # for pickling: try: @@ -1709,28 +1375,17 @@ else: def __call__(self, *args, **kwargs): pass - if not PEP_560: - # Only needed in 3.6. - def _get_type_vars(self, tvars): - if self not in tvars: - tvars.append(self) - -# 3.6-3.9 +# 3.7-3.9 if not hasattr(typing, 'Concatenate'): # Inherits from list as a workaround for Callable checks in Python < 3.9.2. class _ConcatenateGenericAlias(list): # Trick Generic into looking into this for __parameters__. - if PEP_560: - __class__ = typing._GenericAlias - else: - __class__ = typing._TypingBase + __class__ = typing._GenericAlias # Flag in 3.8. _special = False - # Attribute in 3.6 and earlier. - _gorg = typing.Generic def __init__(self, origin, args): super().__init__(args) @@ -1755,14 +1410,8 @@ if not hasattr(typing, 'Concatenate'): tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) ) - if not PEP_560: - # Only required in 3.6. - def _get_type_vars(self, tvars): - if self.__origin__ and self.__parameters__: - typing._get_type_vars(self.__parameters__, tvars) - -# 3.6-3.9 +# 3.7-3.9 @typing._tp_cache def _concatenate_getitem(self, parameters): if parameters == (): @@ -1797,7 +1446,7 @@ elif sys.version_info[:2] >= (3, 9): """ return _concatenate_getitem(self, parameters) # 3.7-8 -elif sys.version_info[:2] >= (3, 7): +else: class _ConcatenateForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name @@ -1817,42 +1466,6 @@ elif sys.version_info[:2] >= (3, 7): See PEP 612 for detailed information. """) -# 3.6 -else: - class _ConcatenateAliasMeta(typing.TypingMeta): - """Metaclass for Concatenate.""" - - def __repr__(self): - return 'typing_extensions.Concatenate' - - class _ConcatenateAliasBase(typing._FinalTypingBase, - metaclass=_ConcatenateAliasMeta, - _root=True): - """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a - higher order function which adds, removes or transforms parameters of a - callable. - - For example:: - - Callable[Concatenate[int, P], int] - - See PEP 612 for detailed information. - """ - __slots__ = () - - def __instancecheck__(self, obj): - raise TypeError("Concatenate cannot be used with isinstance().") - - def __subclasscheck__(self, cls): - raise TypeError("Concatenate cannot be used with issubclass().") - - def __repr__(self): - return 'typing_extensions.Concatenate' - - def __getitem__(self, parameters): - return _concatenate_getitem(self, parameters) - - Concatenate = _ConcatenateAliasBase(_root=True) # 3.10+ if hasattr(typing, 'TypeGuard'): @@ -1907,10 +1520,10 @@ elif sys.version_info[:2] >= (3, 9): ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards). """ - item = typing._type_check(parameters, f'{self} accepts only single type.') + item = typing._type_check(parameters, f'{self} accepts only a single type.') return typing._GenericAlias(self, (item,)) # 3.7-3.8 -elif sys.version_info[:2] >= (3, 7): +else: class _TypeGuardForm(typing._SpecialForm, _root=True): def __repr__(self): @@ -1965,135 +1578,78 @@ elif sys.version_info[:2] >= (3, 7): ``TypeGuard`` also works with type variables. For more information, see PEP 647 (User-Defined Type Guards). """) -# 3.6 + + +# Vendored from cpython typing._SpecialFrom +class _SpecialForm(typing._Final, _root=True): + __slots__ = ('_name', '__doc__', '_getitem') + + def __init__(self, getitem): + self._getitem = getitem + self._name = getitem.__name__ + self.__doc__ = getitem.__doc__ + + def __getattr__(self, item): + if item in {'__name__', '__qualname__'}: + return self._name + + raise AttributeError(item) + + def __mro_entries__(self, bases): + raise TypeError(f"Cannot subclass {self!r}") + + def __repr__(self): + return f'typing_extensions.{self._name}' + + def __reduce__(self): + return self._name + + def __call__(self, *args, **kwds): + raise TypeError(f"Cannot instantiate {self!r}") + + def __or__(self, other): + return typing.Union[self, other] + + def __ror__(self, other): + return typing.Union[other, self] + + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance()") + + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass()") + + @typing._tp_cache + def __getitem__(self, parameters): + return self._getitem(self, parameters) + + +if hasattr(typing, "LiteralString"): + LiteralString = typing.LiteralString else: - class _TypeGuard(typing._FinalTypingBase, _root=True): - """Special typing form used to annotate the return type of a user-defined - type guard function. ``TypeGuard`` only accepts a single type argument. - At runtime, functions marked this way should return a boolean. + @_SpecialForm + def LiteralString(self, params): + """Represents an arbitrary literal string. - ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static - type checkers to determine a more precise type of an expression within a - program's code flow. Usually type narrowing is done by analyzing - conditional code flow and applying the narrowing to a block of code. The - conditional expression here is sometimes referred to as a "type guard". + Example:: - Sometimes it would be convenient to use a user-defined boolean function - as a type guard. Such a function should use ``TypeGuard[...]`` as its - return type to alert static type checkers to this intention. + from typing_extensions import LiteralString - Using ``-> TypeGuard`` tells the static type checker that for a given - function: + def query(sql: LiteralString) -> ...: + ... - 1. The return value is a boolean. - 2. If the return value is ``True``, the type of its argument - is the type inside ``TypeGuard``. + query("SELECT * FROM table") # ok + query(f"SELECT * FROM {input()}") # not ok - For example:: + See PEP 675 for details. - def is_str(val: Union[str, float]): - # "isinstance" type guard - if isinstance(val, str): - # Type of ``val`` is narrowed to ``str`` - ... - else: - # Else, type of ``val`` is narrowed to ``float``. - ... - - Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower - form of ``TypeA`` (it can even be a wider form) and this may lead to - type-unsafe results. The main reason is to allow for things like - narrowing ``List[object]`` to ``List[str]`` even though the latter is not - a subtype of the former, since ``List`` is invariant. The responsibility of - writing type-safe type guards is left to the user. - - ``TypeGuard`` also works with type variables. For more information, see - PEP 647 (User-Defined Type Guards). """ + raise TypeError(f"{self} is not subscriptable") - __slots__ = ('__type__',) - - def __init__(self, tp=None, **kwds): - self.__type__ = tp - - def __getitem__(self, item): - cls = type(self) - if self.__type__ is None: - return cls(typing._type_check(item, - f'{cls.__name__[1:]} accepts only a single type.'), - _root=True) - raise TypeError(f'{cls.__name__[1:]} cannot be further subscripted') - - def _eval_type(self, globalns, localns): - new_tp = typing._eval_type(self.__type__, globalns, localns) - if new_tp == self.__type__: - return self - return type(self)(new_tp, _root=True) - - def __repr__(self): - r = super().__repr__() - if self.__type__ is not None: - r += f'[{typing._type_repr(self.__type__)}]' - return r - - def __hash__(self): - return hash((type(self).__name__, self.__type__)) - - def __eq__(self, other): - if not isinstance(other, _TypeGuard): - return NotImplemented - if self.__type__ is not None: - return self.__type__ == other.__type__ - return self is other - - TypeGuard = _TypeGuard(_root=True) if hasattr(typing, "Self"): Self = typing.Self -elif sys.version_info[:2] >= (3, 7): - # Vendored from cpython typing._SpecialFrom - class _SpecialForm(typing._Final, _root=True): - __slots__ = ('_name', '__doc__', '_getitem') - - def __init__(self, getitem): - self._getitem = getitem - self._name = getitem.__name__ - self.__doc__ = getitem.__doc__ - - def __getattr__(self, item): - if item in {'__name__', '__qualname__'}: - return self._name - - raise AttributeError(item) - - def __mro_entries__(self, bases): - raise TypeError(f"Cannot subclass {self!r}") - - def __repr__(self): - return f'typing_extensions.{self._name}' - - def __reduce__(self): - return self._name - - def __call__(self, *args, **kwds): - raise TypeError(f"Cannot instantiate {self!r}") - - def __or__(self, other): - return typing.Union[self, other] - - def __ror__(self, other): - return typing.Union[other, self] - - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance()") - - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass()") - - @typing._tp_cache - def __getitem__(self, parameters): - return self._getitem(self, parameters) - +else: @_SpecialForm def Self(self, params): """Used to spell the type of "self" in classes. @@ -2110,30 +1666,36 @@ elif sys.version_info[:2] >= (3, 7): """ raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, "Never"): + Never = typing.Never else: - class _Self(typing._FinalTypingBase, _root=True): - """Used to spell the type of "self" in classes. + @_SpecialForm + def Never(self, params): + """The bottom type, a type that has no members. - Example:: + This can be used to define a function that should never be + called, or a function that never returns:: - from typing import Self + from typing_extensions import Never - class ReturnsSelf: - def parse(self, data: bytes) -> Self: - ... - return self + def never_call_me(arg: Never) -> None: + pass + + def int_or_str(arg: int | str) -> None: + never_call_me(arg) # type checker error + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + never_call_me(arg) # ok, arg is of type Never """ - __slots__ = () - - def __instancecheck__(self, obj): - raise TypeError(f"{self} cannot be used with isinstance().") - - def __subclasscheck__(self, cls): - raise TypeError(f"{self} cannot be used with issubclass().") - - Self = _Self(_root=True) + raise TypeError(f"{self} is not subscriptable") if hasattr(typing, 'Required'): @@ -2161,7 +1723,7 @@ elif sys.version_info[:2] >= (3, 9): There is no runtime checking that a required key is actually provided when instantiating a related TypedDict. """ - item = typing._type_check(parameters, f'{self._name} accepts only single type') + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) @_ExtensionsSpecialForm @@ -2178,17 +1740,17 @@ elif sys.version_info[:2] >= (3, 9): year=1999, ) """ - item = typing._type_check(parameters, f'{self._name} accepts only single type') + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) -elif sys.version_info[:2] >= (3, 7): +else: class _RequiredForm(typing._SpecialForm, _root=True): def __repr__(self): return 'typing_extensions.' + self._name def __getitem__(self, parameters): item = typing._type_check(parameters, - '{} accepts only single type'.format(self._name)) + f'{self._name} accepts only a single type.') return typing._GenericAlias(self, (item,)) Required = _RequiredForm( @@ -2222,75 +1784,426 @@ elif sys.version_info[:2] >= (3, 7): year=1999, ) """) + + +if hasattr(typing, "Unpack"): # 3.11+ + Unpack = typing.Unpack +elif sys.version_info[:2] >= (3, 9): + class _UnpackSpecialForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar + + @_UnpackSpecialForm + def Unpack(self, parameters): + """A special typing construct to unpack a variadic type. For example: + + Shape = TypeVarTuple('Shape') + Batch = NewType('Batch', int) + + def add_batch_axis( + x: Array[Unpack[Shape]] + ) -> Array[Batch, Unpack[Shape]]: ... + + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + else: - # NOTE: Modeled after _Final's implementation when _FinalTypingBase available - class _MaybeRequired(typing._FinalTypingBase, _root=True): - __slots__ = ('__type__',) + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar - def __init__(self, tp=None, **kwds): - self.__type__ = tp + class _UnpackForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name - def __getitem__(self, item): - cls = type(self) - if self.__type__ is None: - return cls(typing._type_check(item, - '{} accepts only single type.'.format(cls.__name__[1:])), - _root=True) - raise TypeError('{} cannot be further subscripted' - .format(cls.__name__[1:])) + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) - def _eval_type(self, globalns, localns): - new_tp = typing._eval_type(self.__type__, globalns, localns) - if new_tp == self.__type__: - return self - return type(self)(new_tp, _root=True) + Unpack = _UnpackForm( + 'Unpack', + doc="""A special typing construct to unpack a variadic type. For example: + + Shape = TypeVarTuple('Shape') + Batch = NewType('Batch', int) + + def add_batch_axis( + x: Array[Unpack[Shape]] + ) -> Array[Batch, Unpack[Shape]]: ... + + """) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + + +if hasattr(typing, "TypeVarTuple"): # 3.11+ + + # Add default Parameter - PEP 696 + class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True): + """Type variable tuple.""" + + def __init__(self, name, *, default=None): + super().__init__(name) + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + +else: + class TypeVarTuple(_DefaultMixin): + """Type variable tuple. + + Usage:: + + Ts = TypeVarTuple('Ts') + + In the same way that a normal type variable is a stand-in for a single + type such as ``int``, a type variable *tuple* is a stand-in for a *tuple* + type such as ``Tuple[int, str]``. + + Type variable tuples can be used in ``Generic`` declarations. + Consider the following example:: + + class Array(Generic[*Ts]): ... + + The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``, + where ``T1`` and ``T2`` are type variables. To use these type variables + as type parameters of ``Array``, we must *unpack* the type variable tuple using + the star operator: ``*Ts``. The signature of ``Array`` then behaves + as if we had simply written ``class Array(Generic[T1, T2]): ...``. + In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows + us to parameterise the class with an *arbitrary* number of type parameters. + + Type variable tuples can be used anywhere a normal ``TypeVar`` can. + This includes class definitions, as shown above, as well as function + signatures and variable annotations:: + + class Array(Generic[*Ts]): + + def __init__(self, shape: Tuple[*Ts]): + self._shape: Tuple[*Ts] = shape + + def get_shape(self) -> Tuple[*Ts]: + return self._shape + + shape = (Height(480), Width(640)) + x: Array[Height, Width] = Array(shape) + y = abs(x) # Inferred type is Array[Height, Width] + z = x + x # ... is Array[Height, Width] + x.get_shape() # ... is tuple[Height, Width] + + """ + + # Trick Generic __parameters__. + __class__ = typing.TypeVar + + def __iter__(self): + yield self.__unpacked__ + + def __init__(self, name, *, default=None): + self.__name__ = name + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + self.__unpacked__ = Unpack[self] def __repr__(self): - r = super().__repr__() - if self.__type__ is not None: - r += '[{}]'.format(typing._type_repr(self.__type__)) - return r + return self.__name__ def __hash__(self): - return hash((type(self).__name__, self.__type__)) + return object.__hash__(self) def __eq__(self, other): - if not isinstance(other, type(self)): - return NotImplemented - if self.__type__ is not None: - return self.__type__ == other.__type__ return self is other - class _Required(_MaybeRequired, _root=True): - """A special typing construct to mark a key of a total=False TypedDict - as required. For example: + def __reduce__(self): + return self.__name__ - class Movie(TypedDict, total=False): - title: Required[str] - year: int + def __init_subclass__(self, *args, **kwds): + if '_root' not in kwds: + raise TypeError("Cannot subclass special typing classes") - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) - There is no runtime checking that a required key is actually provided - when instantiating a related TypedDict. +if hasattr(typing, "reveal_type"): + reveal_type = typing.reveal_type +else: + def reveal_type(__obj: T) -> T: + """Reveal the inferred type of a variable. + + When a static type checker encounters a call to ``reveal_type()``, + it will emit the inferred type of the argument:: + + x: int = 1 + reveal_type(x) + + Running a static type checker (e.g., ``mypy``) on this example + will produce output similar to 'Revealed type is "builtins.int"'. + + At runtime, the function prints the runtime type of the + argument and returns it unchanged. + """ + print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr) + return __obj - class _NotRequired(_MaybeRequired, _root=True): - """A special typing construct to mark a key of a TypedDict as - potentially missing. For example: - class Movie(TypedDict): - title: str - year: NotRequired[int] +if hasattr(typing, "assert_never"): + assert_never = typing.assert_never +else: + def assert_never(__arg: Never) -> Never: + """Assert to the type checker that a line of code is unreachable. + + Example:: + + def int_or_str(arg: int | str) -> None: + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + assert_never(arg) + + If a type checker finds that a call to assert_never() is + reachable, it will emit an error. + + At runtime, this throws an exception when called. - m = Movie( - title='The Matrix', # typechecker error if key is omitted - year=1999, - ) """ + raise AssertionError("Expected code to be unreachable") - Required = _Required(_root=True) - NotRequired = _NotRequired(_root=True) + +if hasattr(typing, 'dataclass_transform'): + dataclass_transform = typing.dataclass_transform +else: + def dataclass_transform( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + field_specifiers: typing.Tuple[ + typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]], + ... + ] = (), + **kwargs: typing.Any, + ) -> typing.Callable[[T], T]: + """Decorator that marks a function, class, or metaclass as providing + dataclass-like behavior. + + Example: + + from typing_extensions import dataclass_transform + + _T = TypeVar("_T") + + # Used on a decorator function + @dataclass_transform() + def create_model(cls: type[_T]) -> type[_T]: + ... + return cls + + @create_model + class CustomerModel: + id: int + name: str + + # Used on a base class + @dataclass_transform() + class ModelBase: ... + + class CustomerModel(ModelBase): + id: int + name: str + + # Used on a metaclass + @dataclass_transform() + class ModelMeta(type): ... + + class ModelBase(metaclass=ModelMeta): ... + + class CustomerModel(ModelBase): + id: int + name: str + + Each of the ``CustomerModel`` classes defined in this example will now + behave similarly to a dataclass created with the ``@dataclasses.dataclass`` + decorator. For example, the type checker will synthesize an ``__init__`` + method. + + The arguments to this decorator can be used to customize this behavior: + - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be + True or False if it is omitted by the caller. + - ``order_default`` indicates whether the ``order`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``kw_only_default`` indicates whether the ``kw_only`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``field_specifiers`` specifies a static list of supported classes + or functions that describe fields, similar to ``dataclasses.field()``. + + At runtime, this decorator records its arguments in the + ``__dataclass_transform__`` attribute on the decorated object. + + See PEP 681 for details. + + """ + def decorator(cls_or_fn): + cls_or_fn.__dataclass_transform__ = { + "eq_default": eq_default, + "order_default": order_default, + "kw_only_default": kw_only_default, + "field_specifiers": field_specifiers, + "kwargs": kwargs, + } + return cls_or_fn + return decorator + + +if hasattr(typing, "override"): + override = typing.override +else: + _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any]) + + def override(__arg: _F) -> _F: + """Indicate that a method is intended to override a method in a base class. + + Usage: + + class Base: + def method(self) -> None: ... + pass + + class Child(Base): + @override + def method(self) -> None: + super().method() + + When this decorator is applied to a method, the type checker will + validate that it overrides a method with the same name on a base class. + This helps prevent bugs that may occur when a base class is changed + without an equivalent change to a child class. + + See PEP 698 for details. + + """ + return __arg + + +# We have to do some monkey patching to deal with the dual nature of +# Unpack/TypeVarTuple: +# - We want Unpack to be a kind of TypeVar so it gets accepted in +# Generic[Unpack[Ts]] +# - We want it to *not* be treated as a TypeVar for the purposes of +# counting generic parameters, so that when we subscript a generic, +# the runtime doesn't try to substitute the Unpack with the subscripted type. +if not hasattr(typing, "TypeVarTuple"): + typing._collect_type_vars = _collect_type_vars + typing._check_generic = _check_generic + + +# Backport typing.NamedTuple as it exists in Python 3.11. +# In 3.11, the ability to define generic `NamedTuple`s was supported. +# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8. +if sys.version_info >= (3, 11): + NamedTuple = typing.NamedTuple +else: + def _caller(): + try: + return sys._getframe(2).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): # For platforms without _getframe() + return None + + def _make_nmtuple(name, types, module, defaults=()): + fields = [n for n, t in types] + annotations = {n: typing._type_check(t, f"field {n} annotation must be a type") + for n, t in types} + nm_tpl = collections.namedtuple(name, fields, + defaults=defaults, module=module) + nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations + # The `_field_types` attribute was removed in 3.9; + # in earlier versions, it is the same as the `__annotations__` attribute + if sys.version_info < (3, 9): + nm_tpl._field_types = annotations + return nm_tpl + + _prohibited_namedtuple_fields = typing._prohibited + _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'}) + + class _NamedTupleMeta(type): + def __new__(cls, typename, bases, ns): + assert _NamedTuple in bases + for base in bases: + if base is not _NamedTuple and base is not typing.Generic: + raise TypeError( + 'can only inherit from a NamedTuple type and Generic') + bases = tuple(tuple if base is _NamedTuple else base for base in bases) + types = ns.get('__annotations__', {}) + default_names = [] + for field_name in types: + if field_name in ns: + default_names.append(field_name) + elif default_names: + raise TypeError(f"Non-default namedtuple field {field_name} " + f"cannot follow default field" + f"{'s' if len(default_names) > 1 else ''} " + f"{', '.join(default_names)}") + nm_tpl = _make_nmtuple( + typename, types.items(), + defaults=[ns[n] for n in default_names], + module=ns['__module__'] + ) + nm_tpl.__bases__ = bases + if typing.Generic in bases: + class_getitem = typing.Generic.__class_getitem__.__func__ + nm_tpl.__class_getitem__ = classmethod(class_getitem) + # update from user namespace without overriding special namedtuple attributes + for key in ns: + if key in _prohibited_namedtuple_fields: + raise AttributeError("Cannot overwrite NamedTuple attribute " + key) + elif key not in _special_namedtuple_fields and key not in nm_tpl._fields: + setattr(nm_tpl, key, ns[key]) + if typing.Generic in bases: + nm_tpl.__init_subclass__() + return nm_tpl + + def NamedTuple(__typename, __fields=None, **kwargs): + if __fields is None: + __fields = kwargs.items() + elif kwargs: + raise TypeError("Either list of fields or keywords" + " can be provided to NamedTuple, not both") + return _make_nmtuple(__typename, __fields, module=_caller()) + + NamedTuple.__doc__ = typing.NamedTuple.__doc__ + _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {}) + + # On 3.8+, alter the signature so that it matches typing.NamedTuple. + # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7, + # so just leave the signature as it is on 3.7. + if sys.version_info >= (3, 8): + NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)' + + def _namedtuple_mro_entries(bases): + assert NamedTuple in bases + return (_NamedTuple,) + + NamedTuple.__mro_entries__ = _namedtuple_mro_entries diff --git a/requirements.txt b/requirements.txt index a0dcd531..773e6f01 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ beautifulsoup4==4.11.1 bleach==5.0.1 certifi==2022.9.24 cheroot==8.6.0 -cherrypy==18.6.1 +cherrypy==18.8.0 cloudinary==1.29.0 distro==1.7.0 dnspython==2.2.1