mirror of
https://github.com/Tautulli/Tautulli.git
synced 2025-07-05 20:51:15 -07:00
Bump tokenize-rt from 6.0.0 to 6.1.0 (#2436)
* Bump tokenize-rt from 6.0.0 to 6.1.0 Bumps [tokenize-rt](https://github.com/asottile/tokenize-rt) from 6.0.0 to 6.1.0. - [Commits](https://github.com/asottile/tokenize-rt/compare/v6.0.0...v6.1.0) --- updated-dependencies: - dependency-name: tokenize-rt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> * Update tokenize-rt==6.1.0 --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com> [skip ci]
This commit is contained in:
parent
baf926e5db
commit
2fe3f039cc
2 changed files with 19 additions and 7 deletions
|
@ -6,11 +6,11 @@ import keyword
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import tokenize
|
import tokenize
|
||||||
from typing import Generator
|
from collections.abc import Generator
|
||||||
from typing import Iterable
|
from collections.abc import Iterable
|
||||||
|
from collections.abc import Sequence
|
||||||
|
from re import Pattern
|
||||||
from typing import NamedTuple
|
from typing import NamedTuple
|
||||||
from typing import Pattern
|
|
||||||
from typing import Sequence
|
|
||||||
|
|
||||||
# this is a performance hack. see https://bugs.python.org/issue43014
|
# this is a performance hack. see https://bugs.python.org/issue43014
|
||||||
if ( # pragma: no branch
|
if ( # pragma: no branch
|
||||||
|
@ -47,6 +47,16 @@ class Token(NamedTuple):
|
||||||
_string_re = re.compile('^([^\'"]*)(.*)$', re.DOTALL)
|
_string_re = re.compile('^([^\'"]*)(.*)$', re.DOTALL)
|
||||||
_escaped_nl_re = re.compile(r'\\(\n|\r\n|\r)')
|
_escaped_nl_re = re.compile(r'\\(\n|\r\n|\r)')
|
||||||
|
|
||||||
|
NAMED_UNICODE_RE = re.compile(r'(?<!\\)(?:\\\\)*(\\N\{[^}]+\})')
|
||||||
|
|
||||||
|
|
||||||
|
def curly_escape(s: str) -> str:
|
||||||
|
parts = NAMED_UNICODE_RE.split(s)
|
||||||
|
return ''.join(
|
||||||
|
part.replace('{', '{{').replace('}', '}}') if i % 2 == 0 else part
|
||||||
|
for i, part in enumerate(parts)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def _re_partition(regex: Pattern[str], s: str) -> tuple[str, str, str]:
|
def _re_partition(regex: Pattern[str], s: str) -> tuple[str, str, str]:
|
||||||
match = regex.search(s)
|
match = regex.search(s)
|
||||||
|
@ -101,8 +111,10 @@ def src_to_tokens(src: str) -> list[Token]:
|
||||||
tok_name = tokenize.tok_name[tok_type]
|
tok_name = tokenize.tok_name[tok_type]
|
||||||
|
|
||||||
if tok_name == 'FSTRING_MIDDLE': # pragma: >=3.12 cover
|
if tok_name == 'FSTRING_MIDDLE': # pragma: >=3.12 cover
|
||||||
ecol += tok_text.count('{') + tok_text.count('}')
|
if '{' in tok_text or '}' in tok_text:
|
||||||
tok_text = tok_text.replace('{', '{{').replace('}', '}}')
|
new_tok_text = curly_escape(tok_text)
|
||||||
|
ecol += len(new_tok_text) - len(tok_text)
|
||||||
|
tok_text = new_tok_text
|
||||||
|
|
||||||
tokens.append(Token(tok_name, tok_text, sline, end_offset))
|
tokens.append(Token(tok_name, tok_text, sline, end_offset))
|
||||||
last_line, last_col = eline, ecol
|
last_line, last_col = eline, ecol
|
||||||
|
|
|
@ -39,7 +39,7 @@ rumps==0.4.0; platform_system == "Darwin"
|
||||||
simplejson==3.19.3
|
simplejson==3.19.3
|
||||||
six==1.16.0
|
six==1.16.0
|
||||||
tempora==5.7.0
|
tempora==5.7.0
|
||||||
tokenize-rt==6.0.0
|
tokenize-rt==6.1.0
|
||||||
tzdata==2024.2
|
tzdata==2024.2
|
||||||
tzlocal==5.0.1
|
tzlocal==5.0.1
|
||||||
urllib3<2
|
urllib3<2
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue