From 2fe3f039cc8f39cd7365be9c2e037345ac8db55b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 09:59:58 -0800 Subject: [PATCH] Bump tokenize-rt from 6.0.0 to 6.1.0 (#2436) * Bump tokenize-rt from 6.0.0 to 6.1.0 Bumps [tokenize-rt](https://github.com/asottile/tokenize-rt) from 6.0.0 to 6.1.0. - [Commits](https://github.com/asottile/tokenize-rt/compare/v6.0.0...v6.1.0) --- updated-dependencies: - dependency-name: tokenize-rt dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] * Update tokenize-rt==6.1.0 --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com> [skip ci] --- lib/tokenize_rt.py | 24 ++++++++++++++++++------ requirements.txt | 2 +- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/lib/tokenize_rt.py b/lib/tokenize_rt.py index 9f12e01f..f909ead7 100644 --- a/lib/tokenize_rt.py +++ b/lib/tokenize_rt.py @@ -6,11 +6,11 @@ import keyword import re import sys import tokenize -from typing import Generator -from typing import Iterable +from collections.abc import Generator +from collections.abc import Iterable +from collections.abc import Sequence +from re import Pattern from typing import NamedTuple -from typing import Pattern -from typing import Sequence # this is a performance hack. see https://bugs.python.org/issue43014 if ( # pragma: no branch @@ -47,6 +47,16 @@ class Token(NamedTuple): _string_re = re.compile('^([^\'"]*)(.*)$', re.DOTALL) _escaped_nl_re = re.compile(r'\\(\n|\r\n|\r)') +NAMED_UNICODE_RE = re.compile(r'(? str: + parts = NAMED_UNICODE_RE.split(s) + return ''.join( + part.replace('{', '{{').replace('}', '}}') if i % 2 == 0 else part + for i, part in enumerate(parts) + ) + def _re_partition(regex: Pattern[str], s: str) -> tuple[str, str, str]: match = regex.search(s) @@ -101,8 +111,10 @@ def src_to_tokens(src: str) -> list[Token]: tok_name = tokenize.tok_name[tok_type] if tok_name == 'FSTRING_MIDDLE': # pragma: >=3.12 cover - ecol += tok_text.count('{') + tok_text.count('}') - tok_text = tok_text.replace('{', '{{').replace('}', '}}') + if '{' in tok_text or '}' in tok_text: + new_tok_text = curly_escape(tok_text) + ecol += len(new_tok_text) - len(tok_text) + tok_text = new_tok_text tokens.append(Token(tok_name, tok_text, sline, end_offset)) last_line, last_col = eline, ecol diff --git a/requirements.txt b/requirements.txt index e91d999f..01bdd50d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -39,7 +39,7 @@ rumps==0.4.0; platform_system == "Darwin" simplejson==3.19.3 six==1.16.0 tempora==5.7.0 -tokenize-rt==6.0.0 +tokenize-rt==6.1.0 tzdata==2024.2 tzlocal==5.0.1 urllib3<2