Bump dnspython from 2.2.1 to 2.3.0 (#1975)

* Bump dnspython from 2.2.1 to 2.3.0

Bumps [dnspython](https://github.com/rthalley/dnspython) from 2.2.1 to 2.3.0.
- [Release notes](https://github.com/rthalley/dnspython/releases)
- [Changelog](https://github.com/rthalley/dnspython/blob/master/doc/whatsnew.rst)
- [Commits](https://github.com/rthalley/dnspython/compare/v2.2.1...v2.3.0)

---
updated-dependencies:
- dependency-name: dnspython
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update dnspython==2.3.0

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com>

[skip ci]
This commit is contained in:
dependabot[bot] 2023-03-02 20:54:32 -08:00 committed by GitHub
parent 6910079330
commit 32c06a8b72
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
137 changed files with 7699 additions and 4277 deletions

View file

@ -17,6 +17,8 @@
"""Tokenize DNS zone file format"""
from typing import Any, Optional, List, Tuple
import io
import sys
@ -24,7 +26,7 @@ import dns.exception
import dns.name
import dns.ttl
_DELIMITERS = {' ', '\t', '\n', ';', '(', ')', '"'}
_DELIMITERS = {" ", "\t", "\n", ";", "(", ")", '"'}
_QUOTING_DELIMITERS = {'"'}
EOF = 0
@ -48,7 +50,13 @@ class Token:
has_escape: Does the token value contain escapes?
"""
def __init__(self, ttype, value='', has_escape=False, comment=None):
def __init__(
self,
ttype: int,
value: Any = "",
has_escape: bool = False,
comment: Optional[str] = None,
):
"""Initialize a token instance."""
self.ttype = ttype
@ -56,55 +64,53 @@ class Token:
self.has_escape = has_escape
self.comment = comment
def is_eof(self):
def is_eof(self) -> bool:
return self.ttype == EOF
def is_eol(self):
def is_eol(self) -> bool:
return self.ttype == EOL
def is_whitespace(self):
def is_whitespace(self) -> bool:
return self.ttype == WHITESPACE
def is_identifier(self):
def is_identifier(self) -> bool:
return self.ttype == IDENTIFIER
def is_quoted_string(self):
def is_quoted_string(self) -> bool:
return self.ttype == QUOTED_STRING
def is_comment(self):
def is_comment(self) -> bool:
return self.ttype == COMMENT
def is_delimiter(self): # pragma: no cover (we don't return delimiters yet)
def is_delimiter(self) -> bool: # pragma: no cover (we don't return delimiters yet)
return self.ttype == DELIMITER
def is_eol_or_eof(self):
def is_eol_or_eof(self) -> bool:
return self.ttype == EOL or self.ttype == EOF
def __eq__(self, other):
if not isinstance(other, Token):
return False
return (self.ttype == other.ttype and
self.value == other.value)
return self.ttype == other.ttype and self.value == other.value
def __ne__(self, other):
if not isinstance(other, Token):
return True
return (self.ttype != other.ttype or
self.value != other.value)
return self.ttype != other.ttype or self.value != other.value
def __str__(self):
return '%d "%s"' % (self.ttype, self.value)
def unescape(self):
def unescape(self) -> "Token":
if not self.has_escape:
return self
unescaped = ''
unescaped = ""
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == '\\':
if c == "\\":
if i >= l: # pragma: no cover (can't happen via get())
raise dns.exception.UnexpectedEnd
c = self.value[i]
@ -127,7 +133,7 @@ class Token:
unescaped += c
return Token(self.ttype, unescaped)
def unescape_to_bytes(self):
def unescape_to_bytes(self) -> "Token":
# We used to use unescape() for TXT-like records, but this
# caused problems as we'd process DNS escapes into Unicode code
# points instead of byte values, and then a to_text() of the
@ -152,13 +158,13 @@ class Token:
#
# foo\226\128\139bar
#
unescaped = b''
unescaped = b""
l = len(self.value)
i = 0
while i < l:
c = self.value[i]
i += 1
if c == '\\':
if c == "\\":
if i >= l: # pragma: no cover (can't happen via get())
raise dns.exception.UnexpectedEnd
c = self.value[i]
@ -177,7 +183,7 @@ class Token:
codepoint = int(c) * 100 + int(c2) * 10 + int(c3)
if codepoint > 255:
raise dns.exception.SyntaxError
unescaped += b'%c' % (codepoint)
unescaped += b"%c" % (codepoint)
else:
# Note that as mentioned above, if c is a Unicode
# code point outside of the ASCII range, then this
@ -223,7 +229,12 @@ class Tokenizer:
encoder/decoder is used.
"""
def __init__(self, f=sys.stdin, filename=None, idna_codec=None):
def __init__(
self,
f: Any = sys.stdin,
filename: Optional[str] = None,
idna_codec: Optional[dns.name.IDNACodec] = None,
):
"""Initialize a tokenizer instance.
f: The file to tokenize. The default is sys.stdin.
@ -241,49 +252,50 @@ class Tokenizer:
if isinstance(f, str):
f = io.StringIO(f)
if filename is None:
filename = '<string>'
filename = "<string>"
elif isinstance(f, bytes):
f = io.StringIO(f.decode())
if filename is None:
filename = '<string>'
filename = "<string>"
else:
if filename is None:
if f is sys.stdin:
filename = '<stdin>'
filename = "<stdin>"
else:
filename = '<file>'
filename = "<file>"
self.file = f
self.ungotten_char = None
self.ungotten_token = None
self.ungotten_char: Optional[str] = None
self.ungotten_token: Optional[Token] = None
self.multiline = 0
self.quoting = False
self.eof = False
self.delimiters = _DELIMITERS
self.line_number = 1
assert filename is not None
self.filename = filename
if idna_codec is None:
idna_codec = dns.name.IDNA_2003
self.idna_codec = idna_codec
self.idna_codec: dns.name.IDNACodec = dns.name.IDNA_2003
else:
self.idna_codec = idna_codec
def _get_char(self):
"""Read a character from input.
"""
def _get_char(self) -> str:
"""Read a character from input."""
if self.ungotten_char is None:
if self.eof:
c = ''
c = ""
else:
c = self.file.read(1)
if c == '':
if c == "":
self.eof = True
elif c == '\n':
elif c == "\n":
self.line_number += 1
else:
c = self.ungotten_char
self.ungotten_char = None
return c
def where(self):
def where(self) -> Tuple[str, int]:
"""Return the current location in the input.
Returns a (string, int) tuple. The first item is the filename of
@ -292,7 +304,7 @@ class Tokenizer:
return (self.filename, self.line_number)
def _unget_char(self, c):
def _unget_char(self, c: str) -> None:
"""Unget a character.
The unget buffer for characters is only one character large; it is
@ -308,7 +320,7 @@ class Tokenizer:
raise UngetBufferFull # pragma: no cover
self.ungotten_char = c
def skip_whitespace(self):
def skip_whitespace(self) -> int:
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
@ -322,13 +334,13 @@ class Tokenizer:
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
if c != " " and c != "\t":
if (c != "\n") or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1
def get(self, want_leading=False, want_comment=False):
def get(self, want_leading: bool = False, want_comment: bool = False) -> Token:
"""Get the next token.
want_leading: If True, return a WHITESPACE token if the
@ -345,33 +357,33 @@ class Tokenizer:
"""
if self.ungotten_token is not None:
token = self.ungotten_token
utoken = self.ungotten_token
self.ungotten_token = None
if token.is_whitespace():
if utoken.is_whitespace():
if want_leading:
return token
elif token.is_comment():
return utoken
elif utoken.is_comment():
if want_comment:
return token
return utoken
else:
return token
return utoken
skipped = self.skip_whitespace()
if want_leading and skipped > 0:
return Token(WHITESPACE, ' ')
token = ''
return Token(WHITESPACE, " ")
token = ""
ttype = IDENTIFIER
has_escape = False
while True:
c = self._get_char()
if c == '' or c in self.delimiters:
if c == '' and self.quoting:
if c == "" or c in self.delimiters:
if c == "" and self.quoting:
raise dns.exception.UnexpectedEnd
if token == '' and ttype != QUOTED_STRING:
if c == '(':
if token == "" and ttype != QUOTED_STRING:
if c == "(":
self.multiline += 1
self.skip_whitespace()
continue
elif c == ')':
elif c == ")":
if self.multiline <= 0:
raise dns.exception.SyntaxError
self.multiline -= 1
@ -388,28 +400,29 @@ class Tokenizer:
self.delimiters = _DELIMITERS
self.skip_whitespace()
continue
elif c == '\n':
return Token(EOL, '\n')
elif c == ';':
elif c == "\n":
return Token(EOL, "\n")
elif c == ";":
while 1:
c = self._get_char()
if c == '\n' or c == '':
if c == "\n" or c == "":
break
token += c
if want_comment:
self._unget_char(c)
return Token(COMMENT, token)
elif c == '':
elif c == "":
if self.multiline:
raise dns.exception.SyntaxError(
'unbalanced parentheses')
"unbalanced parentheses"
)
return Token(EOF, comment=token)
elif self.multiline:
self.skip_whitespace()
token = ''
token = ""
continue
else:
return Token(EOL, '\n', comment=token)
return Token(EOL, "\n", comment=token)
else:
# This code exists in case we ever want a
# delimiter to be returned. It never produces
@ -419,9 +432,9 @@ class Tokenizer:
else:
self._unget_char(c)
break
elif self.quoting and c == '\n':
raise dns.exception.SyntaxError('newline in quoted string')
elif c == '\\':
elif self.quoting and c == "\n":
raise dns.exception.SyntaxError("newline in quoted string")
elif c == "\\":
#
# It's an escape. Put it and the next character into
# the token; it will be checked later for goodness.
@ -429,16 +442,16 @@ class Tokenizer:
token += c
has_escape = True
c = self._get_char()
if c == '' or (c == '\n' and not self.quoting):
if c == "" or (c == "\n" and not self.quoting):
raise dns.exception.UnexpectedEnd
token += c
if token == '' and ttype != QUOTED_STRING:
if token == "" and ttype != QUOTED_STRING:
if self.multiline:
raise dns.exception.SyntaxError('unbalanced parentheses')
raise dns.exception.SyntaxError("unbalanced parentheses")
ttype = EOF
return Token(ttype, token, has_escape)
def unget(self, token):
def unget(self, token: Token) -> None:
"""Unget a token.
The unget buffer for tokens is only one token large; it is
@ -472,7 +485,7 @@ class Tokenizer:
# Helpers
def get_int(self, base=10):
def get_int(self, base: int = 10) -> int:
"""Read the next token and interpret it as an unsigned integer.
Raises dns.exception.SyntaxError if not an unsigned integer.
@ -482,12 +495,12 @@ class Tokenizer:
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
raise dns.exception.SyntaxError("expecting an identifier")
if not token.value.isdigit():
raise dns.exception.SyntaxError('expecting an integer')
raise dns.exception.SyntaxError("expecting an integer")
return int(token.value, base)
def get_uint8(self):
def get_uint8(self) -> int:
"""Read the next token and interpret it as an 8-bit unsigned
integer.
@ -499,10 +512,11 @@ class Tokenizer:
value = self.get_int()
if value < 0 or value > 255:
raise dns.exception.SyntaxError(
'%d is not an unsigned 8-bit integer' % value)
"%d is not an unsigned 8-bit integer" % value
)
return value
def get_uint16(self, base=10):
def get_uint16(self, base: int = 10) -> int:
"""Read the next token and interpret it as a 16-bit unsigned
integer.
@ -515,13 +529,15 @@ class Tokenizer:
if value < 0 or value > 65535:
if base == 8:
raise dns.exception.SyntaxError(
'%o is not an octal unsigned 16-bit integer' % value)
"%o is not an octal unsigned 16-bit integer" % value
)
else:
raise dns.exception.SyntaxError(
'%d is not an unsigned 16-bit integer' % value)
"%d is not an unsigned 16-bit integer" % value
)
return value
def get_uint32(self, base=10):
def get_uint32(self, base: int = 10) -> int:
"""Read the next token and interpret it as a 32-bit unsigned
integer.
@ -533,10 +549,11 @@ class Tokenizer:
value = self.get_int(base=base)
if value < 0 or value > 4294967295:
raise dns.exception.SyntaxError(
'%d is not an unsigned 32-bit integer' % value)
"%d is not an unsigned 32-bit integer" % value
)
return value
def get_uint48(self, base=10):
def get_uint48(self, base: int = 10) -> int:
"""Read the next token and interpret it as a 48-bit unsigned
integer.
@ -548,10 +565,11 @@ class Tokenizer:
value = self.get_int(base=base)
if value < 0 or value > 281474976710655:
raise dns.exception.SyntaxError(
'%d is not an unsigned 48-bit integer' % value)
"%d is not an unsigned 48-bit integer" % value
)
return value
def get_string(self, max_length=None):
def get_string(self, max_length: Optional[int] = None) -> str:
"""Read the next token and interpret it as a string.
Raises dns.exception.SyntaxError if not a string.
@ -563,12 +581,12 @@ class Tokenizer:
token = self.get().unescape()
if not (token.is_identifier() or token.is_quoted_string()):
raise dns.exception.SyntaxError('expecting a string')
raise dns.exception.SyntaxError("expecting a string")
if max_length and len(token.value) > max_length:
raise dns.exception.SyntaxError("string too long")
return token.value
def get_identifier(self):
def get_identifier(self) -> str:
"""Read the next token, which should be an identifier.
Raises dns.exception.SyntaxError if not an identifier.
@ -578,10 +596,10 @@ class Tokenizer:
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
raise dns.exception.SyntaxError("expecting an identifier")
return token.value
def get_remaining(self, max_tokens=None):
def get_remaining(self, max_tokens: Optional[int] = None) -> List[Token]:
"""Return the remaining tokens on the line, until an EOL or EOF is seen.
max_tokens: If not None, stop after this number of tokens.
@ -600,7 +618,7 @@ class Tokenizer:
break
return tokens
def concatenate_remaining_identifiers(self, allow_empty=False):
def concatenate_remaining_identifiers(self, allow_empty: bool = False) -> str:
"""Read the remaining tokens on the line, which should be identifiers.
Raises dns.exception.SyntaxError if there are no remaining tokens,
@ -622,10 +640,16 @@ class Tokenizer:
raise dns.exception.SyntaxError
s += token.value
if not (allow_empty or s):
raise dns.exception.SyntaxError('expecting another identifier')
raise dns.exception.SyntaxError("expecting another identifier")
return s
def as_name(self, token, origin=None, relativize=False, relativize_to=None):
def as_name(
self,
token: Token,
origin: Optional[dns.name.Name] = None,
relativize: bool = False,
relativize_to: Optional[dns.name.Name] = None,
) -> dns.name.Name:
"""Try to interpret the token as a DNS name.
Raises dns.exception.SyntaxError if not a name.
@ -633,11 +657,16 @@ class Tokenizer:
Returns a dns.name.Name.
"""
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
raise dns.exception.SyntaxError("expecting an identifier")
name = dns.name.from_text(token.value, origin, self.idna_codec)
return name.choose_relativity(relativize_to or origin, relativize)
def get_name(self, origin=None, relativize=False, relativize_to=None):
def get_name(
self,
origin: Optional[dns.name.Name] = None,
relativize: bool = False,
relativize_to: Optional[dns.name.Name] = None,
) -> dns.name.Name:
"""Read the next token and interpret it as a DNS name.
Raises dns.exception.SyntaxError if not a name.
@ -648,7 +677,7 @@ class Tokenizer:
token = self.get()
return self.as_name(token, origin, relativize, relativize_to)
def get_eol_as_token(self):
def get_eol_as_token(self) -> Token:
"""Read the next token and raise an exception if it isn't EOL or
EOF.
@ -658,14 +687,14 @@ class Tokenizer:
token = self.get()
if not token.is_eol_or_eof():
raise dns.exception.SyntaxError(
'expected EOL or EOF, got %d "%s"' % (token.ttype,
token.value))
'expected EOL or EOF, got %d "%s"' % (token.ttype, token.value)
)
return token
def get_eol(self):
def get_eol(self) -> str:
return self.get_eol_as_token().value
def get_ttl(self):
def get_ttl(self) -> int:
"""Read the next token and interpret it as a DNS TTL.
Raises dns.exception.SyntaxError or dns.ttl.BadTTL if not an
@ -676,5 +705,5 @@ class Tokenizer:
token = self.get().unescape()
if not token.is_identifier():
raise dns.exception.SyntaxError('expecting an identifier')
raise dns.exception.SyntaxError("expecting an identifier")
return dns.ttl.from_text(token.value)