mirror of
https://github.com/Tautulli/Tautulli.git
synced 2025-07-07 05:31:15 -07:00
Bump pyparsing from 3.1.1 to 3.1.2 (#2296)
* Bump pyparsing from 3.1.1 to 3.1.2 Bumps [pyparsing](https://github.com/pyparsing/pyparsing) from 3.1.1 to 3.1.2. - [Release notes](https://github.com/pyparsing/pyparsing/releases) - [Changelog](https://github.com/pyparsing/pyparsing/blob/master/CHANGES) - [Commits](https://github.com/pyparsing/pyparsing/compare/3.1.1...pyparsing_3.1.2) --- updated-dependencies: - dependency-name: pyparsing dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] <support@github.com> * Update pyparsing==3.1.2 --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com> [skip ci]
This commit is contained in:
parent
1d96e0f859
commit
26358427ce
12 changed files with 610 additions and 706 deletions
|
@ -120,8 +120,8 @@ class version_info(NamedTuple):
|
|||
return f"{__name__}.{type(self).__name__}({', '.join('{}={!r}'.format(*nv) for nv in zip(self._fields, self))})"
|
||||
|
||||
|
||||
__version_info__ = version_info(3, 1, 1, "final", 1)
|
||||
__version_time__ = "29 Jul 2023 22:27 UTC"
|
||||
__version_info__ = version_info(3, 1, 2, "final", 1)
|
||||
__version_time__ = "06 Mar 2024 07:08 UTC"
|
||||
__version__ = __version_info__.__version__
|
||||
__versionTime__ = __version_time__
|
||||
__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
|
||||
|
|
|
@ -111,7 +111,6 @@ def with_attribute(*args, **attr_dict):
|
|||
<div type="graph">1,3 2,3 1,1</div>
|
||||
<div>this has no type</div>
|
||||
</div>
|
||||
|
||||
'''
|
||||
div,div_end = make_html_tags("div")
|
||||
|
||||
|
@ -199,19 +198,9 @@ def with_class(classname, namespace=""):
|
|||
|
||||
# pre-PEP8 compatibility symbols
|
||||
# fmt: off
|
||||
@replaced_by_pep8(replace_with)
|
||||
def replaceWith(): ...
|
||||
|
||||
@replaced_by_pep8(remove_quotes)
|
||||
def removeQuotes(): ...
|
||||
|
||||
@replaced_by_pep8(with_attribute)
|
||||
def withAttribute(): ...
|
||||
|
||||
@replaced_by_pep8(with_class)
|
||||
def withClass(): ...
|
||||
|
||||
@replaced_by_pep8(match_only_at_col)
|
||||
def matchOnlyAtCol(): ...
|
||||
|
||||
replaceWith = replaced_by_pep8("replaceWith", replace_with)
|
||||
removeQuotes = replaced_by_pep8("removeQuotes", remove_quotes)
|
||||
withAttribute = replaced_by_pep8("withAttribute", with_attribute)
|
||||
withClass = replaced_by_pep8("withClass", with_class)
|
||||
matchOnlyAtCol = replaced_by_pep8("matchOnlyAtCol", match_only_at_col)
|
||||
# fmt: on
|
||||
|
|
|
@ -206,7 +206,7 @@ class pyparsing_common:
|
|||
scientific notation and returns a float"""
|
||||
|
||||
# streamlining this expression makes the docs nicer-looking
|
||||
number = (sci_real | real | signed_integer).setName("number").streamline()
|
||||
number = (sci_real | real | signed_integer).set_name("number").streamline()
|
||||
"""any numeric expression, returns the corresponding Python type"""
|
||||
|
||||
fnumber = (
|
||||
|
@ -216,6 +216,13 @@ class pyparsing_common:
|
|||
)
|
||||
"""any int or real number, returned as float"""
|
||||
|
||||
ieee_float = (
|
||||
Regex(r"(?i)[+-]?((\d+\.?\d*(e[+-]?\d+)?)|nan|inf(inity)?)")
|
||||
.set_name("ieee_float")
|
||||
.set_parse_action(convert_to_float)
|
||||
)
|
||||
"""any floating-point literal (int, real number, infinity, or NaN), returned as float"""
|
||||
|
||||
identifier = Word(identchars, identbodychars).set_name("identifier")
|
||||
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -473,7 +473,7 @@ def _to_diagram_element(
|
|||
:param show_groups: bool flag indicating whether to show groups using bounding box
|
||||
"""
|
||||
exprs = element.recurse()
|
||||
name = name_hint or element.customName or element.__class__.__name__
|
||||
name = name_hint or element.customName or type(element).__name__
|
||||
|
||||
# Python's id() is used to provide a unique identifier for elements
|
||||
el_id = id(element)
|
||||
|
|
|
@ -14,11 +14,13 @@ from .util import (
|
|||
from .unicode import pyparsing_unicode as ppu
|
||||
|
||||
|
||||
class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
|
||||
class _ExceptionWordUnicodeSet(
|
||||
ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
|
||||
_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums)
|
||||
_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
|
||||
|
||||
|
||||
|
@ -86,26 +88,24 @@ class ParseBaseException(Exception):
|
|||
ret.append(" " * (exc.column - 1) + "^")
|
||||
ret.append(f"{type(exc).__name__}: {exc}")
|
||||
|
||||
if depth > 0:
|
||||
if depth <= 0:
|
||||
return "\n".join(ret)
|
||||
|
||||
callers = inspect.getinnerframes(exc.__traceback__, context=depth)
|
||||
seen = set()
|
||||
for i, ff in enumerate(callers[-depth:]):
|
||||
for ff in callers[-depth:]:
|
||||
frm = ff[0]
|
||||
|
||||
f_self = frm.f_locals.get("self", None)
|
||||
if isinstance(f_self, ParserElement):
|
||||
if not frm.f_code.co_name.startswith(
|
||||
("parseImpl", "_parseNoCache")
|
||||
):
|
||||
if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")):
|
||||
continue
|
||||
if id(f_self) in seen:
|
||||
continue
|
||||
seen.add(id(f_self))
|
||||
|
||||
self_type = type(f_self)
|
||||
ret.append(
|
||||
f"{self_type.__module__}.{self_type.__name__} - {f_self}"
|
||||
)
|
||||
ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}")
|
||||
|
||||
elif f_self is not None:
|
||||
self_type = type(f_self)
|
||||
|
@ -220,8 +220,10 @@ class ParseBaseException(Exception):
|
|||
|
||||
Example::
|
||||
|
||||
# an expression to parse 3 integers
|
||||
expr = pp.Word(pp.nums) * 3
|
||||
try:
|
||||
# a failing parse - the third integer is prefixed with "A"
|
||||
expr.parse_string("123 456 A789")
|
||||
except pp.ParseException as pe:
|
||||
print(pe.explain(depth=0))
|
||||
|
@ -244,8 +246,7 @@ class ParseBaseException(Exception):
|
|||
return self.explain_exception(self, depth)
|
||||
|
||||
# fmt: off
|
||||
@replaced_by_pep8(mark_input_line)
|
||||
def markInputline(self): ...
|
||||
markInputline = replaced_by_pep8("markInputline", mark_input_line)
|
||||
# fmt: on
|
||||
|
||||
|
||||
|
@ -255,16 +256,16 @@ class ParseException(ParseBaseException):
|
|||
|
||||
Example::
|
||||
|
||||
integer = Word(nums).set_name("integer")
|
||||
try:
|
||||
Word(nums).set_name("integer").parse_string("ABC")
|
||||
integer.parse_string("ABC")
|
||||
except ParseException as pe:
|
||||
print(pe)
|
||||
print("column: {}".format(pe.column))
|
||||
print(f"column: {pe.column}")
|
||||
|
||||
prints::
|
||||
|
||||
Expected integer (at char 0), (line:1, col:1)
|
||||
column: 1
|
||||
Expected integer (at char 0), (line:1, col:1) column: 1
|
||||
|
||||
"""
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ def counted_array(
|
|||
intExpr = intExpr.copy()
|
||||
intExpr.set_name("arrayLen")
|
||||
intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
|
||||
return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
|
||||
return (intExpr + array_expr).set_name(f"(len) {expr}...")
|
||||
|
||||
|
||||
def match_previous_literal(expr: ParserElement) -> ParserElement:
|
||||
|
@ -95,15 +95,17 @@ def match_previous_literal(expr: ParserElement) -> ParserElement:
|
|||
rep = Forward()
|
||||
|
||||
def copy_token_to_repeater(s, l, t):
|
||||
if t:
|
||||
if not t:
|
||||
rep << Empty()
|
||||
return
|
||||
|
||||
if len(t) == 1:
|
||||
rep << t[0]
|
||||
else:
|
||||
return
|
||||
|
||||
# flatten t tokens
|
||||
tflat = _flatten(t.as_list())
|
||||
rep << And(Literal(tt) for tt in tflat)
|
||||
else:
|
||||
rep << Empty()
|
||||
|
||||
expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
|
||||
rep.set_name("(prev) " + str(expr))
|
||||
|
@ -230,7 +232,7 @@ def one_of(
|
|||
if isequal(other, cur):
|
||||
del symbols[i + j + 1]
|
||||
break
|
||||
elif masks(cur, other):
|
||||
if masks(cur, other):
|
||||
del symbols[i + j + 1]
|
||||
symbols.insert(i, other)
|
||||
break
|
||||
|
@ -534,7 +536,9 @@ def nested_expr(
|
|||
)
|
||||
else:
|
||||
ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
|
||||
ret.set_name("nested %s%s expression" % (opener, closer))
|
||||
ret.set_name(f"nested {opener}{closer} expression")
|
||||
# don't override error message from content expressions
|
||||
ret.errmsg = None
|
||||
return ret
|
||||
|
||||
|
||||
|
@ -580,7 +584,7 @@ def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">"))
|
|||
)
|
||||
closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
|
||||
|
||||
openTag.set_name("<%s>" % resname)
|
||||
openTag.set_name(f"<{resname}>")
|
||||
# add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
|
||||
openTag.add_parse_action(
|
||||
lambda t: t.__setitem__(
|
||||
|
@ -589,7 +593,7 @@ def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">"))
|
|||
)
|
||||
closeTag = closeTag(
|
||||
"end" + "".join(resname.replace(":", " ").title().split())
|
||||
).set_name("</%s>" % resname)
|
||||
).set_name(f"</{resname}>")
|
||||
openTag.tag = resname
|
||||
closeTag.tag = resname
|
||||
openTag.tag_body = SkipTo(closeTag())
|
||||
|
@ -777,7 +781,7 @@ def infix_notation(
|
|||
rpar = Suppress(rpar)
|
||||
|
||||
# if lpar and rpar are not suppressed, wrap in group
|
||||
if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
|
||||
if not (isinstance(lpar, Suppress) and isinstance(rpar, Suppress)):
|
||||
lastExpr = base_expr | Group(lpar + ret + rpar)
|
||||
else:
|
||||
lastExpr = base_expr | (lpar + ret + rpar)
|
||||
|
@ -787,7 +791,7 @@ def infix_notation(
|
|||
pa: typing.Optional[ParseAction]
|
||||
opExpr1: ParserElement
|
||||
opExpr2: ParserElement
|
||||
for i, operDef in enumerate(op_list):
|
||||
for operDef in op_list:
|
||||
opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment]
|
||||
if isinstance(opExpr, str_type):
|
||||
opExpr = ParserElement._literalStringClass(opExpr)
|
||||
|
@ -1058,43 +1062,17 @@ dblSlashComment = dbl_slash_comment
|
|||
cppStyleComment = cpp_style_comment
|
||||
javaStyleComment = java_style_comment
|
||||
pythonStyleComment = python_style_comment
|
||||
|
||||
@replaced_by_pep8(DelimitedList)
|
||||
def delimitedList(): ...
|
||||
|
||||
@replaced_by_pep8(DelimitedList)
|
||||
def delimited_list(): ...
|
||||
|
||||
@replaced_by_pep8(counted_array)
|
||||
def countedArray(): ...
|
||||
|
||||
@replaced_by_pep8(match_previous_literal)
|
||||
def matchPreviousLiteral(): ...
|
||||
|
||||
@replaced_by_pep8(match_previous_expr)
|
||||
def matchPreviousExpr(): ...
|
||||
|
||||
@replaced_by_pep8(one_of)
|
||||
def oneOf(): ...
|
||||
|
||||
@replaced_by_pep8(dict_of)
|
||||
def dictOf(): ...
|
||||
|
||||
@replaced_by_pep8(original_text_for)
|
||||
def originalTextFor(): ...
|
||||
|
||||
@replaced_by_pep8(nested_expr)
|
||||
def nestedExpr(): ...
|
||||
|
||||
@replaced_by_pep8(make_html_tags)
|
||||
def makeHTMLTags(): ...
|
||||
|
||||
@replaced_by_pep8(make_xml_tags)
|
||||
def makeXMLTags(): ...
|
||||
|
||||
@replaced_by_pep8(replace_html_entity)
|
||||
def replaceHTMLEntity(): ...
|
||||
|
||||
@replaced_by_pep8(infix_notation)
|
||||
def infixNotation(): ...
|
||||
delimitedList = replaced_by_pep8("delimitedList", DelimitedList)
|
||||
delimited_list = replaced_by_pep8("delimited_list", DelimitedList)
|
||||
countedArray = replaced_by_pep8("countedArray", counted_array)
|
||||
matchPreviousLiteral = replaced_by_pep8("matchPreviousLiteral", match_previous_literal)
|
||||
matchPreviousExpr = replaced_by_pep8("matchPreviousExpr", match_previous_expr)
|
||||
oneOf = replaced_by_pep8("oneOf", one_of)
|
||||
dictOf = replaced_by_pep8("dictOf", dict_of)
|
||||
originalTextFor = replaced_by_pep8("originalTextFor", original_text_for)
|
||||
nestedExpr = replaced_by_pep8("nestedExpr", nested_expr)
|
||||
makeHTMLTags = replaced_by_pep8("makeHTMLTags", make_html_tags)
|
||||
makeXMLTags = replaced_by_pep8("makeXMLTags", make_xml_tags)
|
||||
replaceHTMLEntity = replaced_by_pep8("replaceHTMLEntity", replace_html_entity)
|
||||
infixNotation = replaced_by_pep8("infixNotation", infix_notation)
|
||||
# fmt: on
|
||||
|
|
|
@ -173,26 +173,32 @@ class ParseResults:
|
|||
):
|
||||
self._tokdict: Dict[str, _ParseResultsWithOffset]
|
||||
self._modal = modal
|
||||
if name is not None and name != "":
|
||||
|
||||
if name is None or name == "":
|
||||
return
|
||||
|
||||
if isinstance(name, int):
|
||||
name = str(name)
|
||||
|
||||
if not modal:
|
||||
self._all_names = {name}
|
||||
|
||||
self._name = name
|
||||
if toklist not in self._null_values:
|
||||
|
||||
if toklist in self._null_values:
|
||||
return
|
||||
|
||||
if isinstance(toklist, (str_type, type)):
|
||||
toklist = [toklist]
|
||||
|
||||
if asList:
|
||||
if isinstance(toklist, ParseResults):
|
||||
self[name] = _ParseResultsWithOffset(
|
||||
ParseResults(toklist._toklist), 0
|
||||
)
|
||||
self[name] = _ParseResultsWithOffset(ParseResults(toklist._toklist), 0)
|
||||
else:
|
||||
self[name] = _ParseResultsWithOffset(
|
||||
ParseResults(toklist[0]), 0
|
||||
)
|
||||
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)
|
||||
self[name]._name = name
|
||||
else:
|
||||
return
|
||||
|
||||
try:
|
||||
self[name] = toklist[0]
|
||||
except (KeyError, TypeError, IndexError):
|
||||
|
@ -204,10 +210,10 @@ class ParseResults:
|
|||
def __getitem__(self, i):
|
||||
if isinstance(i, (int, slice)):
|
||||
return self._toklist[i]
|
||||
else:
|
||||
|
||||
if i not in self._all_names:
|
||||
return self._tokdict[i][-1][0]
|
||||
else:
|
||||
|
||||
return ParseResults([v[0] for v in self._tokdict[i]])
|
||||
|
||||
def __setitem__(self, k, v, isinstance=isinstance):
|
||||
|
@ -226,7 +232,10 @@ class ParseResults:
|
|||
sub._parent = self
|
||||
|
||||
def __delitem__(self, i):
|
||||
if isinstance(i, (int, slice)):
|
||||
if not isinstance(i, (int, slice)):
|
||||
del self._tokdict[i]
|
||||
return
|
||||
|
||||
mylen = len(self._toklist)
|
||||
del self._toklist[i]
|
||||
|
||||
|
@ -239,14 +248,12 @@ class ParseResults:
|
|||
removed = list(range(*i.indices(mylen)))
|
||||
removed.reverse()
|
||||
# fixup indices in token dictionary
|
||||
for name, occurrences in self._tokdict.items():
|
||||
for occurrences in self._tokdict.values():
|
||||
for j in removed:
|
||||
for k, (value, position) in enumerate(occurrences):
|
||||
occurrences[k] = _ParseResultsWithOffset(
|
||||
value, position - (position > j)
|
||||
)
|
||||
else:
|
||||
del self._tokdict[i]
|
||||
|
||||
def __contains__(self, k) -> bool:
|
||||
return k in self._tokdict
|
||||
|
@ -376,7 +383,7 @@ class ParseResults:
|
|||
"""
|
||||
self._toklist.insert(index, ins_string)
|
||||
# fixup indices in token dictionary
|
||||
for name, occurrences in self._tokdict.items():
|
||||
for occurrences in self._tokdict.values():
|
||||
for k, (value, position) in enumerate(occurrences):
|
||||
occurrences[k] = _ParseResultsWithOffset(
|
||||
value, position + (position > index)
|
||||
|
@ -652,15 +659,23 @@ class ParseResults:
|
|||
NL = "\n"
|
||||
out.append(indent + str(self.as_list()) if include_list else "")
|
||||
|
||||
if full:
|
||||
if not full:
|
||||
return "".join(out)
|
||||
|
||||
if self.haskeys():
|
||||
items = sorted((str(k), v) for k, v in self.items())
|
||||
for k, v in items:
|
||||
if out:
|
||||
out.append(NL)
|
||||
out.append(f"{indent}{(' ' * _depth)}- {k}: ")
|
||||
if isinstance(v, ParseResults):
|
||||
if v:
|
||||
if not isinstance(v, ParseResults):
|
||||
out.append(repr(v))
|
||||
continue
|
||||
|
||||
if not v:
|
||||
out.append(str(v))
|
||||
continue
|
||||
|
||||
out.append(
|
||||
v.dump(
|
||||
indent=indent,
|
||||
|
@ -669,40 +684,26 @@ class ParseResults:
|
|||
_depth=_depth + 1,
|
||||
)
|
||||
)
|
||||
else:
|
||||
out.append(str(v))
|
||||
else:
|
||||
out.append(repr(v))
|
||||
if any(isinstance(vv, ParseResults) for vv in self):
|
||||
if not any(isinstance(vv, ParseResults) for vv in self):
|
||||
return "".join(out)
|
||||
|
||||
v = self
|
||||
incr = " "
|
||||
nl = "\n"
|
||||
for i, vv in enumerate(v):
|
||||
if isinstance(vv, ParseResults):
|
||||
out.append(
|
||||
"\n{}{}[{}]:\n{}{}{}".format(
|
||||
indent,
|
||||
(" " * (_depth)),
|
||||
i,
|
||||
indent,
|
||||
(" " * (_depth + 1)),
|
||||
vv.dump(
|
||||
vv_dump = vv.dump(
|
||||
indent=indent,
|
||||
full=full,
|
||||
include_list=include_list,
|
||||
_depth=_depth + 1,
|
||||
),
|
||||
)
|
||||
out.append(
|
||||
f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv_dump}"
|
||||
)
|
||||
else:
|
||||
out.append(
|
||||
"\n%s%s[%d]:\n%s%s%s"
|
||||
% (
|
||||
indent,
|
||||
(" " * (_depth)),
|
||||
i,
|
||||
indent,
|
||||
(" " * (_depth + 1)),
|
||||
str(vv),
|
||||
)
|
||||
f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv}"
|
||||
)
|
||||
|
||||
return "".join(out)
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
# testing.py
|
||||
|
||||
from contextlib import contextmanager
|
||||
import re
|
||||
import typing
|
||||
|
||||
|
||||
from .core import (
|
||||
ParserElement,
|
||||
ParseException,
|
||||
|
@ -49,23 +51,23 @@ class pyparsing_test:
|
|||
self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
|
||||
self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
|
||||
|
||||
self._save_context[
|
||||
"literal_string_class"
|
||||
] = ParserElement._literalStringClass
|
||||
self._save_context["literal_string_class"] = (
|
||||
ParserElement._literalStringClass
|
||||
)
|
||||
|
||||
self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
|
||||
|
||||
self._save_context["packrat_enabled"] = ParserElement._packratEnabled
|
||||
if ParserElement._packratEnabled:
|
||||
self._save_context[
|
||||
"packrat_cache_size"
|
||||
] = ParserElement.packrat_cache.size
|
||||
self._save_context["packrat_cache_size"] = (
|
||||
ParserElement.packrat_cache.size
|
||||
)
|
||||
else:
|
||||
self._save_context["packrat_cache_size"] = None
|
||||
self._save_context["packrat_parse"] = ParserElement._parse
|
||||
self._save_context[
|
||||
"recursion_enabled"
|
||||
] = ParserElement._left_recursion_enabled
|
||||
self._save_context["recursion_enabled"] = (
|
||||
ParserElement._left_recursion_enabled
|
||||
)
|
||||
|
||||
self._save_context["__diag__"] = {
|
||||
name: getattr(__diag__, name) for name in __diag__._all_names
|
||||
|
@ -180,7 +182,12 @@ class pyparsing_test:
|
|||
"""
|
||||
run_test_success, run_test_results = run_tests_report
|
||||
|
||||
if expected_parse_results is not None:
|
||||
if expected_parse_results is None:
|
||||
self.assertTrue(
|
||||
run_test_success, msg=msg if msg is not None else "failed runTests"
|
||||
)
|
||||
return
|
||||
|
||||
merged = [
|
||||
(*rpt, expected)
|
||||
for rpt, expected in zip(run_test_results, expected_parse_results)
|
||||
|
@ -189,9 +196,7 @@ class pyparsing_test:
|
|||
# expected should be a tuple containing a list and/or a dict or an exception,
|
||||
# and optional failure message string
|
||||
# an empty tuple will skip any result validation
|
||||
fail_msg = next(
|
||||
(exp for exp in expected if isinstance(exp, str)), None
|
||||
)
|
||||
fail_msg = next((exp for exp in expected if isinstance(exp, str)), None)
|
||||
expected_exception = next(
|
||||
(
|
||||
exp
|
||||
|
@ -230,9 +235,18 @@ class pyparsing_test:
|
|||
)
|
||||
|
||||
@contextmanager
|
||||
def assertRaisesParseException(self, exc_type=ParseException, msg=None):
|
||||
with self.assertRaises(exc_type, msg=msg):
|
||||
yield
|
||||
def assertRaisesParseException(
|
||||
self, exc_type=ParseException, expected_msg=None, msg=None
|
||||
):
|
||||
if expected_msg is not None:
|
||||
if isinstance(expected_msg, str):
|
||||
expected_msg = re.escape(expected_msg)
|
||||
with self.assertRaisesRegex(exc_type, expected_msg, msg=msg) as ctx:
|
||||
yield ctx
|
||||
|
||||
else:
|
||||
with self.assertRaises(exc_type, msg=msg) as ctx:
|
||||
yield ctx
|
||||
|
||||
@staticmethod
|
||||
def with_line_numbers(
|
||||
|
|
|
@ -102,17 +102,10 @@ class unicode_set:
|
|||
all characters in this range that are valid identifier body characters,
|
||||
plus the digits 0-9, and · (Unicode MIDDLE DOT)
|
||||
"""
|
||||
return "".join(
|
||||
sorted(
|
||||
set(
|
||||
cls.identchars
|
||||
+ "0123456789·"
|
||||
+ "".join(
|
||||
[c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
|
||||
)
|
||||
)
|
||||
)
|
||||
identifier_chars = set(
|
||||
c for c in cls._chars_for_ranges if ("_" + c).isidentifier()
|
||||
)
|
||||
return "".join(sorted(identifier_chars | set(cls.identchars + "0123456789·")))
|
||||
|
||||
@_lazyclassproperty
|
||||
def identifier(cls):
|
||||
|
|
|
@ -237,7 +237,7 @@ def _flatten(ll: list) -> list:
|
|||
return ret
|
||||
|
||||
|
||||
def _make_synonym_function(compat_name: str, fn: C) -> C:
|
||||
def replaced_by_pep8(compat_name: str, fn: C) -> C:
|
||||
# In a future version, uncomment the code in the internal _inner() functions
|
||||
# to begin emitting DeprecationWarnings.
|
||||
|
||||
|
@ -251,7 +251,7 @@ def _make_synonym_function(compat_name: str, fn: C) -> C:
|
|||
@wraps(fn)
|
||||
def _inner(self, *args, **kwargs):
|
||||
# warnings.warn(
|
||||
# f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=3
|
||||
# f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2
|
||||
# )
|
||||
return fn(self, *args, **kwargs)
|
||||
|
||||
|
@ -260,7 +260,7 @@ def _make_synonym_function(compat_name: str, fn: C) -> C:
|
|||
@wraps(fn)
|
||||
def _inner(*args, **kwargs):
|
||||
# warnings.warn(
|
||||
# f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=3
|
||||
# f"Deprecated - use {fn.__name__}", DeprecationWarning, stacklevel=2
|
||||
# )
|
||||
return fn(*args, **kwargs)
|
||||
|
||||
|
@ -275,10 +275,3 @@ def _make_synonym_function(compat_name: str, fn: C) -> C:
|
|||
_inner.__kwdefaults__ = None
|
||||
_inner.__qualname__ = fn.__qualname__
|
||||
return cast(C, _inner)
|
||||
|
||||
|
||||
def replaced_by_pep8(fn: C) -> Callable[[Callable], C]:
|
||||
"""
|
||||
Decorator for pre-PEP8 compatibility synonyms, to link them to the new function.
|
||||
"""
|
||||
return lambda other: _make_synonym_function(other.__name__, fn)
|
||||
|
|
|
@ -32,7 +32,7 @@ plexapi==4.15.10
|
|||
portend==3.2.0
|
||||
profilehooks==1.12.0
|
||||
PyJWT==2.8.0
|
||||
pyparsing==3.1.1
|
||||
pyparsing==3.1.2
|
||||
python-dateutil==2.9.0.post0
|
||||
python-twitter==3.5
|
||||
pytz==2024.1
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue