Bump requests-oauthlib from 1.3.0 to 1.3.1 (#1636)

* Bump requests-oauthlib from 1.3.0 to 1.3.1

Bumps [requests-oauthlib](https://github.com/requests/requests-oauthlib) from 1.3.0 to 1.3.1.
- [Release notes](https://github.com/requests/requests-oauthlib/releases)
- [Changelog](https://github.com/requests/requests-oauthlib/blob/master/HISTORY.rst)
- [Commits](https://github.com/requests/requests-oauthlib/compare/v1.3.0...v1.3.1)

---
updated-dependencies:
- dependency-name: requests-oauthlib
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>

* Update requests-oauthlib==1.3.1

Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com>

[skip ci]
This commit is contained in:
dependabot[bot] 2022-02-07 21:57:50 -08:00 committed by GitHub
parent 5523d4ba88
commit 61960aa744
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
26 changed files with 464 additions and 77 deletions

View file

@ -13,7 +13,7 @@ from .cd import (
mb_encoding_languages,
merge_coherence_ratios,
)
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE
from .constant import IANA_SUPPORTED, TOO_BIG_SEQUENCE, TOO_SMALL_SEQUENCE, TRACE
from .md import mess_ratio
from .models import CharsetMatch, CharsetMatches
from .utils import (
@ -25,6 +25,8 @@ from .utils import (
should_strip_sig_or_bom,
)
# Will most likely be controversial
# logging.addLevelName(TRACE, "TRACE")
logger = logging.getLogger("charset_normalizer")
explain_handler = logging.StreamHandler()
explain_handler.setFormatter(
@ -70,19 +72,20 @@ def from_bytes(
if explain:
previous_logger_level = logger.level # type: int
logger.addHandler(explain_handler)
logger.setLevel(logging.DEBUG)
logger.setLevel(TRACE)
length = len(sequences) # type: int
if length == 0:
logger.warning("Encoding detection on empty bytes, assuming utf_8 intention.")
logger.debug("Encoding detection on empty bytes, assuming utf_8 intention.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level or logging.WARNING)
return CharsetMatches([CharsetMatch(sequences, "utf_8", 0.0, False, [], "")])
if cp_isolation is not None:
logger.debug(
logger.log(
TRACE,
"cp_isolation is set. use this flag for debugging purpose. "
"limited list of encoding allowed : %s.",
", ".join(cp_isolation),
@ -92,7 +95,8 @@ def from_bytes(
cp_isolation = []
if cp_exclusion is not None:
logger.debug(
logger.log(
TRACE,
"cp_exclusion is set. use this flag for debugging purpose. "
"limited list of encoding excluded : %s.",
", ".join(cp_exclusion),
@ -102,7 +106,8 @@ def from_bytes(
cp_exclusion = []
if length <= (chunk_size * steps):
logger.debug(
logger.log(
TRACE,
"override steps (%i) and chunk_size (%i) as content does not fit (%i byte(s) given) parameters.",
steps,
chunk_size,
@ -118,16 +123,18 @@ def from_bytes(
is_too_large_sequence = len(sequences) >= TOO_BIG_SEQUENCE # type: bool
if is_too_small_sequence:
logger.warning(
logger.log(
TRACE,
"Trying to detect encoding from a tiny portion of ({}) byte(s).".format(
length
)
),
)
elif is_too_large_sequence:
logger.info(
logger.log(
TRACE,
"Using lazy str decoding because the payload is quite large, ({}) byte(s).".format(
length
)
),
)
prioritized_encodings = [] # type: List[str]
@ -138,7 +145,8 @@ def from_bytes(
if specified_encoding is not None:
prioritized_encodings.append(specified_encoding)
logger.info(
logger.log(
TRACE,
"Detected declarative mark in sequence. Priority +1 given for %s.",
specified_encoding,
)
@ -157,7 +165,8 @@ def from_bytes(
if sig_encoding is not None:
prioritized_encodings.append(sig_encoding)
logger.info(
logger.log(
TRACE,
"Detected a SIG or BOM mark on first %i byte(s). Priority +1 given for %s.",
len(sig_payload),
sig_encoding,
@ -188,7 +197,8 @@ def from_bytes(
) # type: bool
if encoding_iana in {"utf_16", "utf_32"} and not bom_or_sig_available:
logger.debug(
logger.log(
TRACE,
"Encoding %s wont be tested as-is because it require a BOM. Will try some sub-encoder LE/BE.",
encoding_iana,
)
@ -197,8 +207,10 @@ def from_bytes(
try:
is_multi_byte_decoder = is_multi_byte_encoding(encoding_iana) # type: bool
except (ModuleNotFoundError, ImportError):
logger.debug(
"Encoding %s does not provide an IncrementalDecoder", encoding_iana
logger.log(
TRACE,
"Encoding %s does not provide an IncrementalDecoder",
encoding_iana,
)
continue
@ -219,7 +231,8 @@ def from_bytes(
)
except (UnicodeDecodeError, LookupError) as e:
if not isinstance(e, LookupError):
logger.debug(
logger.log(
TRACE,
"Code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
@ -235,7 +248,8 @@ def from_bytes(
break
if similar_soft_failure_test:
logger.debug(
logger.log(
TRACE,
"%s is deemed too similar to code page %s and was consider unsuited already. Continuing!",
encoding_iana,
encoding_soft_failed,
@ -255,7 +269,8 @@ def from_bytes(
) # type: bool
if multi_byte_bonus:
logger.debug(
logger.log(
TRACE,
"Code page %s is a multi byte encoding table and it appear that at least one character "
"was encoded using n-bytes.",
encoding_iana,
@ -285,7 +300,8 @@ def from_bytes(
errors="ignore" if is_multi_byte_decoder else "strict",
) # type: str
except UnicodeDecodeError as e: # Lazy str loading may have missed something there
logger.debug(
logger.log(
TRACE,
"LazyStr Loading: After MD chunk decode, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
@ -337,7 +353,8 @@ def from_bytes(
try:
sequences[int(50e3) :].decode(encoding_iana, errors="strict")
except UnicodeDecodeError as e:
logger.debug(
logger.log(
TRACE,
"LazyStr Loading: After final lookup, code page %s does not fit given bytes sequence at ALL. %s",
encoding_iana,
str(e),
@ -350,7 +367,8 @@ def from_bytes(
) # type: float
if mean_mess_ratio >= threshold or early_stop_count >= max_chunk_gave_up:
tested_but_soft_failure.append(encoding_iana)
logger.info(
logger.log(
TRACE,
"%s was excluded because of initial chaos probing. Gave up %i time(s). "
"Computed mean chaos is %f %%.",
encoding_iana,
@ -373,7 +391,8 @@ def from_bytes(
fallback_u8 = fallback_entry
continue
logger.info(
logger.log(
TRACE,
"%s passed initial chaos probing. Mean measured chaos is %f %%",
encoding_iana,
round(mean_mess_ratio * 100, ndigits=3),
@ -385,10 +404,11 @@ def from_bytes(
target_languages = mb_encoding_languages(encoding_iana)
if target_languages:
logger.debug(
logger.log(
TRACE,
"{} should target any language(s) of {}".format(
encoding_iana, str(target_languages)
)
),
)
cd_ratios = []
@ -406,10 +426,11 @@ def from_bytes(
cd_ratios_merged = merge_coherence_ratios(cd_ratios)
if cd_ratios_merged:
logger.info(
logger.log(
TRACE,
"We detected language {} using {}".format(
cd_ratios_merged, encoding_iana
)
),
)
results.append(
@ -427,8 +448,8 @@ def from_bytes(
encoding_iana in [specified_encoding, "ascii", "utf_8"]
and mean_mess_ratio < 0.1
):
logger.info(
"%s is most likely the one. Stopping the process.", encoding_iana
logger.debug(
"Encoding detection: %s is most likely the one.", encoding_iana
)
if explain:
logger.removeHandler(explain_handler)
@ -436,8 +457,9 @@ def from_bytes(
return CharsetMatches([results[encoding_iana]])
if encoding_iana == sig_encoding:
logger.info(
"%s is most likely the one as we detected a BOM or SIG within the beginning of the sequence.",
logger.debug(
"Encoding detection: %s is most likely the one as we detected a BOM or SIG within "
"the beginning of the sequence.",
encoding_iana,
)
if explain:
@ -447,13 +469,15 @@ def from_bytes(
if len(results) == 0:
if fallback_u8 or fallback_ascii or fallback_specified:
logger.debug(
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback."
logger.log(
TRACE,
"Nothing got out of the detection process. Using ASCII/UTF-8/Specified fallback.",
)
if fallback_specified:
logger.debug(
"%s will be used as a fallback match", fallback_specified.encoding
"Encoding detection: %s will be used as a fallback match",
fallback_specified.encoding,
)
results.append(fallback_specified)
elif (
@ -465,12 +489,21 @@ def from_bytes(
)
or (fallback_u8 is not None)
):
logger.warning("utf_8 will be used as a fallback match")
logger.debug("Encoding detection: utf_8 will be used as a fallback match")
results.append(fallback_u8)
elif fallback_ascii:
logger.warning("ascii will be used as a fallback match")
logger.debug("Encoding detection: ascii will be used as a fallback match")
results.append(fallback_ascii)
if results:
logger.debug(
"Encoding detection: Found %s as plausible (best-candidate) for content. With %i alternatives.",
results.best().encoding, # type: ignore
len(results) - 1,
)
else:
logger.debug("Encoding detection: Unable to determine any suitable charset.")
if explain:
logger.removeHandler(explain_handler)
logger.setLevel(previous_logger_level)