mirror of
https://github.com/Tautulli/Tautulli.git
synced 2025-07-14 01:02:59 -07:00
Bump tokenize-rt from 4.2.1 to 5.0.0 (#1889)
* Bump tokenize-rt from 4.2.1 to 5.0.0 Bumps [tokenize-rt](https://github.com/asottile/tokenize-rt) from 4.2.1 to 5.0.0. - [Release notes](https://github.com/asottile/tokenize-rt/releases) - [Commits](https://github.com/asottile/tokenize-rt/compare/v4.2.1...v5.0.0) --- updated-dependencies: - dependency-name: tokenize-rt dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] <support@github.com> * Update tokenize-rt==5.0.0 Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com> [skip ci]
This commit is contained in:
parent
6365327595
commit
7f132439be
2 changed files with 18 additions and 46 deletions
|
@ -1,3 +1,5 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import io
|
import io
|
||||||
import keyword
|
import keyword
|
||||||
|
@ -6,20 +8,17 @@ import sys
|
||||||
import tokenize
|
import tokenize
|
||||||
from typing import Generator
|
from typing import Generator
|
||||||
from typing import Iterable
|
from typing import Iterable
|
||||||
from typing import List
|
|
||||||
from typing import NamedTuple
|
from typing import NamedTuple
|
||||||
from typing import Optional
|
|
||||||
from typing import Pattern
|
from typing import Pattern
|
||||||
from typing import Sequence
|
from typing import Sequence
|
||||||
from typing import Tuple
|
|
||||||
|
|
||||||
# this is a performance hack. see https://bugs.python.org/issue43014
|
# this is a performance hack. see https://bugs.python.org/issue43014
|
||||||
if (
|
if ( # pragma: no branch
|
||||||
sys.version_info < (3, 10) and
|
sys.version_info < (3, 10) and
|
||||||
callable(getattr(tokenize, '_compile', None))
|
callable(getattr(tokenize, '_compile', None))
|
||||||
): # pragma: no cover (<py310)
|
): # pragma: <3.10 cover
|
||||||
from functools import lru_cache
|
from functools import lru_cache
|
||||||
tokenize._compile = lru_cache()(tokenize._compile) # type: ignore
|
tokenize._compile = lru_cache()(tokenize._compile)
|
||||||
|
|
||||||
ESCAPED_NL = 'ESCAPED_NL'
|
ESCAPED_NL = 'ESCAPED_NL'
|
||||||
UNIMPORTANT_WS = 'UNIMPORTANT_WS'
|
UNIMPORTANT_WS = 'UNIMPORTANT_WS'
|
||||||
|
@ -27,15 +26,15 @@ NON_CODING_TOKENS = frozenset(('COMMENT', ESCAPED_NL, 'NL', UNIMPORTANT_WS))
|
||||||
|
|
||||||
|
|
||||||
class Offset(NamedTuple):
|
class Offset(NamedTuple):
|
||||||
line: Optional[int] = None
|
line: int | None = None
|
||||||
utf8_byte_offset: Optional[int] = None
|
utf8_byte_offset: int | None = None
|
||||||
|
|
||||||
|
|
||||||
class Token(NamedTuple):
|
class Token(NamedTuple):
|
||||||
name: str
|
name: str
|
||||||
src: str
|
src: str
|
||||||
line: Optional[int] = None
|
line: int | None = None
|
||||||
utf8_byte_offset: Optional[int] = None
|
utf8_byte_offset: int | None = None
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def offset(self) -> Offset:
|
def offset(self) -> Offset:
|
||||||
|
@ -43,11 +42,10 @@ class Token(NamedTuple):
|
||||||
|
|
||||||
|
|
||||||
_string_re = re.compile('^([^\'"]*)(.*)$', re.DOTALL)
|
_string_re = re.compile('^([^\'"]*)(.*)$', re.DOTALL)
|
||||||
_string_prefixes = frozenset('bfru')
|
|
||||||
_escaped_nl_re = re.compile(r'\\(\n|\r\n|\r)')
|
_escaped_nl_re = re.compile(r'\\(\n|\r\n|\r)')
|
||||||
|
|
||||||
|
|
||||||
def _re_partition(regex: Pattern[str], s: str) -> Tuple[str, str, str]:
|
def _re_partition(regex: Pattern[str], s: str) -> tuple[str, str, str]:
|
||||||
match = regex.search(s)
|
match = regex.search(s)
|
||||||
if match:
|
if match:
|
||||||
return s[:match.start()], s[slice(*match.span())], s[match.end():]
|
return s[:match.start()], s[slice(*match.span())], s[match.end():]
|
||||||
|
@ -55,7 +53,7 @@ def _re_partition(regex: Pattern[str], s: str) -> Tuple[str, str, str]:
|
||||||
return (s, '', '')
|
return (s, '', '')
|
||||||
|
|
||||||
|
|
||||||
def src_to_tokens(src: str) -> List[Token]:
|
def src_to_tokens(src: str) -> list[Token]:
|
||||||
tokenize_target = io.StringIO(src)
|
tokenize_target = io.StringIO(src)
|
||||||
lines = ('',) + tuple(tokenize_target)
|
lines = ('',) + tuple(tokenize_target)
|
||||||
|
|
||||||
|
@ -98,33 +96,7 @@ def src_to_tokens(src: str) -> List[Token]:
|
||||||
end_offset += len(newtok.encode())
|
end_offset += len(newtok.encode())
|
||||||
|
|
||||||
tok_name = tokenize.tok_name[tok_type]
|
tok_name = tokenize.tok_name[tok_type]
|
||||||
# when a string prefix is not recognized, the tokenizer produces a
|
tokens.append(Token(tok_name, tok_text, sline, end_offset))
|
||||||
# NAME token followed by a STRING token
|
|
||||||
if (
|
|
||||||
tok_name == 'STRING' and
|
|
||||||
tokens and
|
|
||||||
tokens[-1].name == 'NAME' and
|
|
||||||
frozenset(tokens[-1].src.lower()) <= _string_prefixes
|
|
||||||
):
|
|
||||||
newsrc = tokens[-1].src + tok_text
|
|
||||||
tokens[-1] = tokens[-1]._replace(src=newsrc, name=tok_name)
|
|
||||||
# produce octal literals as a single token in python 3 as well
|
|
||||||
elif (
|
|
||||||
tok_name == 'NUMBER' and
|
|
||||||
tokens and
|
|
||||||
tokens[-1].name == 'NUMBER'
|
|
||||||
):
|
|
||||||
tokens[-1] = tokens[-1]._replace(src=tokens[-1].src + tok_text)
|
|
||||||
# produce long literals as a single token in python 3 as well
|
|
||||||
elif (
|
|
||||||
tok_name == 'NAME' and
|
|
||||||
tok_text.lower() == 'l' and
|
|
||||||
tokens and
|
|
||||||
tokens[-1].name == 'NUMBER'
|
|
||||||
):
|
|
||||||
tokens[-1] = tokens[-1]._replace(src=tokens[-1].src + tok_text)
|
|
||||||
else:
|
|
||||||
tokens.append(Token(tok_name, tok_text, sline, end_offset))
|
|
||||||
last_line, last_col = eline, ecol
|
last_line, last_col = eline, ecol
|
||||||
if sline != eline:
|
if sline != eline:
|
||||||
end_offset = len(lines[last_line][:last_col].encode())
|
end_offset = len(lines[last_line][:last_col].encode())
|
||||||
|
@ -140,19 +112,19 @@ def tokens_to_src(tokens: Iterable[Token]) -> str:
|
||||||
|
|
||||||
def reversed_enumerate(
|
def reversed_enumerate(
|
||||||
tokens: Sequence[Token],
|
tokens: Sequence[Token],
|
||||||
) -> Generator[Tuple[int, Token], None, None]:
|
) -> Generator[tuple[int, Token], None, None]:
|
||||||
for i in reversed(range(len(tokens))):
|
for i in reversed(range(len(tokens))):
|
||||||
yield i, tokens[i]
|
yield i, tokens[i]
|
||||||
|
|
||||||
|
|
||||||
def parse_string_literal(src: str) -> Tuple[str, str]:
|
def parse_string_literal(src: str) -> tuple[str, str]:
|
||||||
"""parse a string literal's source into (prefix, string)"""
|
"""parse a string literal's source into (prefix, string)"""
|
||||||
match = _string_re.match(src)
|
match = _string_re.match(src)
|
||||||
assert match is not None
|
assert match is not None
|
||||||
return match.group(1), match.group(2)
|
return match.group(1), match.group(2)
|
||||||
|
|
||||||
|
|
||||||
def rfind_string_parts(tokens: Sequence[Token], i: int) -> Tuple[int, ...]:
|
def rfind_string_parts(tokens: Sequence[Token], i: int) -> tuple[int, ...]:
|
||||||
"""find the indicies of the string parts of a (joined) string literal
|
"""find the indicies of the string parts of a (joined) string literal
|
||||||
|
|
||||||
- `i` should start at the end of the string literal
|
- `i` should start at the end of the string literal
|
||||||
|
@ -195,7 +167,7 @@ def rfind_string_parts(tokens: Sequence[Token], i: int) -> Tuple[int, ...]:
|
||||||
return tuple(reversed(ret))
|
return tuple(reversed(ret))
|
||||||
|
|
||||||
|
|
||||||
def main(argv: Optional[Sequence[str]] = None) -> int:
|
def main(argv: Sequence[str] | None = None) -> int:
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument('filename')
|
parser.add_argument('filename')
|
||||||
args = parser.parse_args(argv)
|
args = parser.parse_args(argv)
|
||||||
|
@ -210,4 +182,4 @@ def main(argv: Optional[Sequence[str]] = None) -> int:
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
exit(main())
|
raise SystemExit(main())
|
||||||
|
|
|
@ -42,7 +42,7 @@ simplejson==3.17.6
|
||||||
six==1.16.0
|
six==1.16.0
|
||||||
soupsieve==2.3.2.post1
|
soupsieve==2.3.2.post1
|
||||||
tempora==5.0.2
|
tempora==5.0.2
|
||||||
tokenize-rt==4.2.1
|
tokenize-rt==5.0.0
|
||||||
tzdata==2022.6
|
tzdata==2022.6
|
||||||
tzlocal==4.2
|
tzlocal==4.2
|
||||||
urllib3==1.26.12
|
urllib3==1.26.12
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue