mirror of
https://github.com/Tautulli/Tautulli.git
synced 2025-07-05 20:51:15 -07:00
Bump beautifulsoup4 from 4.10.0 to 4.11.1 (#1717)
* Bump beautifulsoup4 from 4.10.0 to 4.11.1 Bumps [beautifulsoup4](https://www.crummy.com/software/BeautifulSoup/bs4/) from 4.10.0 to 4.11.1. --- updated-dependencies: - dependency-name: beautifulsoup4 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] <support@github.com> * Update beautifulsoup4==4.11.1 * Update soupsieve==2.3.2.post1 * Update requirements.txt Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: JonnyWong16 <9099342+JonnyWong16@users.noreply.github.com> [skip ci]
This commit is contained in:
parent
a1fe0b04d7
commit
467ae352f5
28 changed files with 4846 additions and 2609 deletions
|
@ -15,14 +15,13 @@ documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
|||
"""
|
||||
|
||||
__author__ = "Leonard Richardson (leonardr@segfault.org)"
|
||||
__version__ = "4.10.0"
|
||||
__copyright__ = "Copyright (c) 2004-2021 Leonard Richardson"
|
||||
__version__ = "4.11.1"
|
||||
__copyright__ = "Copyright (c) 2004-2022 Leonard Richardson"
|
||||
# Use of this source code is governed by the MIT license.
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = ['BeautifulSoup']
|
||||
|
||||
|
||||
from collections import Counter
|
||||
import os
|
||||
import re
|
||||
|
@ -35,7 +34,11 @@ import warnings
|
|||
if sys.version_info.major < 3:
|
||||
raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.')
|
||||
|
||||
from .builder import builder_registry, ParserRejectedMarkup
|
||||
from .builder import (
|
||||
builder_registry,
|
||||
ParserRejectedMarkup,
|
||||
XMLParsedAsHTMLWarning,
|
||||
)
|
||||
from .dammit import UnicodeDammit
|
||||
from .element import (
|
||||
CData,
|
||||
|
@ -67,7 +70,7 @@ class MarkupResemblesLocatorWarning(UserWarning):
|
|||
on disk.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
class BeautifulSoup(Tag):
|
||||
"""A data structure representing a parsed HTML or XML document.
|
||||
|
||||
|
@ -207,10 +210,10 @@ class BeautifulSoup(Tag):
|
|||
if old_name in kwargs:
|
||||
warnings.warn(
|
||||
'The "%s" argument to the BeautifulSoup constructor '
|
||||
'has been renamed to "%s."' % (old_name, new_name))
|
||||
value = kwargs[old_name]
|
||||
del kwargs[old_name]
|
||||
return value
|
||||
'has been renamed to "%s."' % (old_name, new_name),
|
||||
DeprecationWarning
|
||||
)
|
||||
return kwargs.pop(old_name)
|
||||
return None
|
||||
|
||||
parse_only = parse_only or deprecated_argument(
|
||||
|
@ -305,51 +308,18 @@ class BeautifulSoup(Tag):
|
|||
self._namespaces = dict()
|
||||
self.parse_only = parse_only
|
||||
|
||||
self.builder.initialize_soup(self)
|
||||
|
||||
if hasattr(markup, 'read'): # It's a file-type object.
|
||||
markup = markup.read()
|
||||
elif len(markup) <= 256 and (
|
||||
(isinstance(markup, bytes) and not b'<' in markup)
|
||||
or (isinstance(markup, str) and not '<' in markup)
|
||||
):
|
||||
# Print out warnings for a couple beginner problems
|
||||
# Issue warnings for a couple beginner problems
|
||||
# involving passing non-markup to Beautiful Soup.
|
||||
# Beautiful Soup will still parse the input as markup,
|
||||
# just in case that's what the user really wants.
|
||||
if (isinstance(markup, str)
|
||||
and not os.path.supports_unicode_filenames):
|
||||
possible_filename = markup.encode("utf8")
|
||||
else:
|
||||
possible_filename = markup
|
||||
is_file = False
|
||||
is_directory = False
|
||||
try:
|
||||
is_file = os.path.exists(possible_filename)
|
||||
if is_file:
|
||||
is_directory = os.path.isdir(possible_filename)
|
||||
except Exception as e:
|
||||
# This is almost certainly a problem involving
|
||||
# characters not valid in filenames on this
|
||||
# system. Just let it go.
|
||||
pass
|
||||
if is_directory:
|
||||
warnings.warn(
|
||||
'"%s" looks like a directory name, not markup. You may'
|
||||
' want to open a file found in this directory and pass'
|
||||
' the filehandle into Beautiful Soup.' % (
|
||||
self._decode_markup(markup)
|
||||
),
|
||||
MarkupResemblesLocatorWarning
|
||||
)
|
||||
elif is_file:
|
||||
warnings.warn(
|
||||
'"%s" looks like a filename, not markup. You should'
|
||||
' probably open this file and pass the filehandle into'
|
||||
' Beautiful Soup.' % self._decode_markup(markup),
|
||||
MarkupResemblesLocatorWarning
|
||||
)
|
||||
self._check_markup_is_url(markup)
|
||||
# since that is sometimes the intended behavior.
|
||||
if not self._markup_is_url(markup):
|
||||
self._markup_resembles_filename(markup)
|
||||
|
||||
rejections = []
|
||||
success = False
|
||||
|
@ -358,6 +328,7 @@ class BeautifulSoup(Tag):
|
|||
self.builder.prepare_markup(
|
||||
markup, from_encoding, exclude_encodings=exclude_encodings)):
|
||||
self.reset()
|
||||
self.builder.initialize_soup(self)
|
||||
try:
|
||||
self._feed()
|
||||
success = True
|
||||
|
@ -393,10 +364,10 @@ class BeautifulSoup(Tag):
|
|||
def __getstate__(self):
|
||||
# Frequently a tree builder can't be pickled.
|
||||
d = dict(self.__dict__)
|
||||
if 'builder' in d and not self.builder.picklable:
|
||||
if 'builder' in d and d['builder'] is not None and not self.builder.picklable:
|
||||
d['builder'] = None
|
||||
return d
|
||||
|
||||
|
||||
@classmethod
|
||||
def _decode_markup(cls, markup):
|
||||
"""Ensure `markup` is bytes so it's safe to send into warnings.warn.
|
||||
|
@ -411,11 +382,13 @@ class BeautifulSoup(Tag):
|
|||
return decoded
|
||||
|
||||
@classmethod
|
||||
def _check_markup_is_url(cls, markup):
|
||||
def _markup_is_url(cls, markup):
|
||||
"""Error-handling method to raise a warning if incoming markup looks
|
||||
like a URL.
|
||||
|
||||
:param markup: A string.
|
||||
:return: Whether or not the markup resembles a URL
|
||||
closely enough to justify a warning.
|
||||
"""
|
||||
if isinstance(markup, bytes):
|
||||
space = b' '
|
||||
|
@ -424,20 +397,50 @@ class BeautifulSoup(Tag):
|
|||
space = ' '
|
||||
cant_start_with = ("http:", "https:")
|
||||
else:
|
||||
return
|
||||
return False
|
||||
|
||||
if any(markup.startswith(prefix) for prefix in cant_start_with):
|
||||
if not space in markup:
|
||||
warnings.warn(
|
||||
'"%s" looks like a URL. Beautiful Soup is not an'
|
||||
' HTTP client. You should probably use an HTTP client like'
|
||||
' requests to get the document behind the URL, and feed'
|
||||
' that document to Beautiful Soup.' % cls._decode_markup(
|
||||
markup
|
||||
),
|
||||
'The input looks more like a URL than markup. You may want to use'
|
||||
' an HTTP client like requests to get the document behind'
|
||||
' the URL, and feed that document to Beautiful Soup.',
|
||||
MarkupResemblesLocatorWarning
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _markup_resembles_filename(cls, markup):
|
||||
"""Error-handling method to raise a warning if incoming markup
|
||||
resembles a filename.
|
||||
|
||||
:param markup: A bytestring or string.
|
||||
:return: Whether or not the markup resembles a filename
|
||||
closely enough to justify a warning.
|
||||
"""
|
||||
path_characters = '/\\'
|
||||
extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt']
|
||||
if isinstance(markup, bytes):
|
||||
path_characters = path_characters.encode("utf8")
|
||||
extensions = [x.encode('utf8') for x in extensions]
|
||||
filelike = False
|
||||
if any(x in markup for x in path_characters):
|
||||
filelike = True
|
||||
else:
|
||||
lower = markup.lower()
|
||||
if any(lower.endswith(ext) for ext in extensions):
|
||||
filelike = True
|
||||
if filelike:
|
||||
warnings.warn(
|
||||
'The input looks more like a filename than markup. You may'
|
||||
' want to open this file and pass the filehandle into'
|
||||
' Beautiful Soup.',
|
||||
MarkupResemblesLocatorWarning
|
||||
)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _feed(self):
|
||||
"""Internal method that parses previously set markup, creating a large
|
||||
number of Tag and NavigableString objects.
|
||||
|
@ -689,7 +692,7 @@ class BeautifulSoup(Tag):
|
|||
return most_recently_popped
|
||||
|
||||
def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None,
|
||||
sourcepos=None):
|
||||
sourcepos=None, namespaces=None):
|
||||
"""Called by the tree builder when a new tag is encountered.
|
||||
|
||||
:param name: Name of the tag.
|
||||
|
@ -699,6 +702,8 @@ class BeautifulSoup(Tag):
|
|||
source document.
|
||||
:param sourcepos: The character position within `sourceline` where this
|
||||
tag was found.
|
||||
:param namespaces: A dictionary of all namespace prefix mappings
|
||||
currently in scope in the document.
|
||||
|
||||
If this method returns None, the tag was rejected by an active
|
||||
SoupStrainer. You should proceed as if the tag had not occurred
|
||||
|
@ -716,7 +721,8 @@ class BeautifulSoup(Tag):
|
|||
tag = self.element_classes.get(Tag, Tag)(
|
||||
self, self.builder, name, namespace, nsprefix, attrs,
|
||||
self.currentTag, self._most_recent_element,
|
||||
sourceline=sourceline, sourcepos=sourcepos
|
||||
sourceline=sourceline, sourcepos=sourcepos,
|
||||
namespaces=namespaces
|
||||
)
|
||||
if tag is None:
|
||||
return tag
|
||||
|
@ -735,7 +741,7 @@ class BeautifulSoup(Tag):
|
|||
#print("End tag: " + name)
|
||||
self.endData()
|
||||
self._popToTag(name, nsprefix)
|
||||
|
||||
|
||||
def handle_data(self, data):
|
||||
"""Called by the tree builder when a chunk of textual data is encountered."""
|
||||
self.current_data.append(data)
|
||||
|
@ -782,7 +788,9 @@ class BeautifulStoneSoup(BeautifulSoup):
|
|||
kwargs['features'] = 'xml'
|
||||
warnings.warn(
|
||||
'The BeautifulStoneSoup class is deprecated. Instead of using '
|
||||
'it, pass features="xml" into the BeautifulSoup constructor.')
|
||||
'it, pass features="xml" into the BeautifulSoup constructor.',
|
||||
DeprecationWarning
|
||||
)
|
||||
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
|
|
|
@ -3,10 +3,14 @@ __license__ = "MIT"
|
|||
|
||||
from collections import defaultdict
|
||||
import itertools
|
||||
import re
|
||||
import warnings
|
||||
import sys
|
||||
from bs4.element import (
|
||||
CharsetMetaAttributeValue,
|
||||
ContentMetaAttributeValue,
|
||||
RubyParenthesisString,
|
||||
RubyTextString,
|
||||
Stylesheet,
|
||||
Script,
|
||||
TemplateString,
|
||||
|
@ -28,6 +32,12 @@ XML = 'xml'
|
|||
HTML = 'html'
|
||||
HTML_5 = 'html5'
|
||||
|
||||
class XMLParsedAsHTMLWarning(UserWarning):
|
||||
"""The warning issued when an HTML parser is used to parse
|
||||
XML that is not XHTML.
|
||||
"""
|
||||
MESSAGE = """It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor."""
|
||||
|
||||
|
||||
class TreeBuilderRegistry(object):
|
||||
"""A way of looking up TreeBuilder subclasses by their name or by desired
|
||||
|
@ -319,7 +329,7 @@ class TreeBuilder(object):
|
|||
values = value
|
||||
attrs[attr] = values
|
||||
return attrs
|
||||
|
||||
|
||||
class SAXTreeBuilder(TreeBuilder):
|
||||
"""A Beautiful Soup treebuilder that listens for SAX events.
|
||||
|
||||
|
@ -390,17 +400,25 @@ class HTMLTreeBuilder(TreeBuilder):
|
|||
# you need to use it.
|
||||
block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"])
|
||||
|
||||
# The HTML standard defines an unusual content model for these tags.
|
||||
# We represent this by using a string class other than NavigableString
|
||||
# inside these tags.
|
||||
# These HTML tags need special treatment so they can be
|
||||
# represented by a string class other than NavigableString.
|
||||
#
|
||||
# I made this list by going through the HTML spec
|
||||
# For some of these tags, it's because the HTML standard defines
|
||||
# an unusual content model for them. I made this list by going
|
||||
# through the HTML spec
|
||||
# (https://html.spec.whatwg.org/#metadata-content) and looking for
|
||||
# "metadata content" elements that can contain strings.
|
||||
#
|
||||
# The Ruby tags (<rt> and <rp>) are here despite being normal
|
||||
# "phrasing content" tags, because the content they contain is
|
||||
# qualitatively different from other text in the document, and it
|
||||
# can be useful to be able to distinguish it.
|
||||
#
|
||||
# TODO: Arguably <noscript> could go here but it seems
|
||||
# qualitatively different from the other tags.
|
||||
DEFAULT_STRING_CONTAINERS = {
|
||||
'rt' : RubyTextString,
|
||||
'rp' : RubyParenthesisString,
|
||||
'style': Stylesheet,
|
||||
'script': Script,
|
||||
'template': TemplateString,
|
||||
|
@ -431,7 +449,7 @@ class HTMLTreeBuilder(TreeBuilder):
|
|||
}
|
||||
|
||||
DEFAULT_PRESERVE_WHITESPACE_TAGS = set(['pre', 'textarea'])
|
||||
|
||||
|
||||
def set_up_substitutions(self, tag):
|
||||
"""Replace the declared encoding in a <meta> tag with a placeholder,
|
||||
to be substituted when the tag is output to a string.
|
||||
|
@ -475,6 +493,99 @@ class HTMLTreeBuilder(TreeBuilder):
|
|||
|
||||
return (meta_encoding is not None)
|
||||
|
||||
class DetectsXMLParsedAsHTML(object):
|
||||
"""A mixin class for any class (a TreeBuilder, or some class used by a
|
||||
TreeBuilder) that's in a position to detect whether an XML
|
||||
document is being incorrectly parsed as HTML, and issue an
|
||||
appropriate warning.
|
||||
|
||||
This requires being able to observe an incoming processing
|
||||
instruction that might be an XML declaration, and also able to
|
||||
observe tags as they're opened. If you can't do that for a given
|
||||
TreeBuilder, there's a less reliable implementation based on
|
||||
examining the raw markup.
|
||||
"""
|
||||
|
||||
# Regular expression for seeing if markup has an <html> tag.
|
||||
LOOKS_LIKE_HTML = re.compile("<[^ +]html", re.I)
|
||||
LOOKS_LIKE_HTML_B = re.compile(b"<[^ +]html", re.I)
|
||||
|
||||
XML_PREFIX = '<?xml'
|
||||
XML_PREFIX_B = b'<?xml'
|
||||
|
||||
@classmethod
|
||||
def warn_if_markup_looks_like_xml(cls, markup):
|
||||
"""Perform a check on some markup to see if it looks like XML
|
||||
that's not XHTML. If so, issue a warning.
|
||||
|
||||
This is much less reliable than doing the check while parsing,
|
||||
but some of the tree builders can't do that.
|
||||
|
||||
:return: True if the markup looks like non-XHTML XML, False
|
||||
otherwise.
|
||||
"""
|
||||
if isinstance(markup, bytes):
|
||||
prefix = cls.XML_PREFIX_B
|
||||
looks_like_html = cls.LOOKS_LIKE_HTML_B
|
||||
else:
|
||||
prefix = cls.XML_PREFIX
|
||||
looks_like_html = cls.LOOKS_LIKE_HTML
|
||||
|
||||
if (markup is not None
|
||||
and markup.startswith(prefix)
|
||||
and not looks_like_html.search(markup[:500])
|
||||
):
|
||||
cls._warn()
|
||||
return True
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _warn(cls):
|
||||
"""Issue a warning about XML being parsed as HTML."""
|
||||
warnings.warn(
|
||||
XMLParsedAsHTMLWarning.MESSAGE, XMLParsedAsHTMLWarning
|
||||
)
|
||||
|
||||
def _initialize_xml_detector(self):
|
||||
"""Call this method before parsing a document."""
|
||||
self._first_processing_instruction = None
|
||||
self._root_tag = None
|
||||
|
||||
def _document_might_be_xml(self, processing_instruction):
|
||||
"""Call this method when encountering an XML declaration, or a
|
||||
"processing instruction" that might be an XML declaration.
|
||||
"""
|
||||
if (self._first_processing_instruction is not None
|
||||
or self._root_tag is not None):
|
||||
# The document has already started. Don't bother checking
|
||||
# anymore.
|
||||
return
|
||||
|
||||
self._first_processing_instruction = processing_instruction
|
||||
|
||||
# We won't know until we encounter the first tag whether or
|
||||
# not this is actually a problem.
|
||||
|
||||
def _root_tag_encountered(self, name):
|
||||
"""Call this when you encounter the document's root tag.
|
||||
|
||||
This is where we actually check whether an XML document is
|
||||
being incorrectly parsed as HTML, and issue the warning.
|
||||
"""
|
||||
if self._root_tag is not None:
|
||||
# This method was incorrectly called multiple times. Do
|
||||
# nothing.
|
||||
return
|
||||
|
||||
self._root_tag = name
|
||||
if (name != 'html' and self._first_processing_instruction is not None
|
||||
and self._first_processing_instruction.lower().startswith('xml ')):
|
||||
# We encountered an XML declaration and then a tag other
|
||||
# than 'html'. This is a reliable indicator that a
|
||||
# non-XHTML document is being parsed as XML.
|
||||
self._warn()
|
||||
|
||||
|
||||
def register_treebuilders_from(module):
|
||||
"""Copy TreeBuilders from the given module into this module."""
|
||||
this_module = sys.modules[__name__]
|
||||
|
|
|
@ -8,6 +8,7 @@ __all__ = [
|
|||
import warnings
|
||||
import re
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
PERMISSIVE,
|
||||
HTML,
|
||||
HTML_5,
|
||||
|
@ -70,6 +71,11 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
|||
# UnicodeDammit.
|
||||
if exclude_encodings:
|
||||
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
|
||||
|
||||
# html5lib only parses HTML, so if it's given XML that's worth
|
||||
# noting.
|
||||
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup)
|
||||
|
||||
yield (markup, None, None, False)
|
||||
|
||||
# These methods are defined by Beautiful Soup.
|
||||
|
@ -242,8 +248,8 @@ class AttrList(object):
|
|||
def __setitem__(self, name, value):
|
||||
# If this attribute is a multi-valued attribute for this element,
|
||||
# turn its value into a list.
|
||||
list_attr = self.element.cdata_list_attributes
|
||||
if (name in list_attr['*']
|
||||
list_attr = self.element.cdata_list_attributes or {}
|
||||
if (name in list_attr.get('*')
|
||||
or (self.element.name in list_attr
|
||||
and name in list_attr[self.element.name])):
|
||||
# A node that is being cloned may have already undergone
|
||||
|
|
|
@ -44,6 +44,7 @@ from bs4.element import (
|
|||
from bs4.dammit import EntitySubstitution, UnicodeDammit
|
||||
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
HTML,
|
||||
HTMLTreeBuilder,
|
||||
STRICT,
|
||||
|
@ -52,7 +53,7 @@ from bs4.builder import (
|
|||
|
||||
HTMLPARSER = 'html.parser'
|
||||
|
||||
class BeautifulSoupHTMLParser(HTMLParser):
|
||||
class BeautifulSoupHTMLParser(HTMLParser, DetectsXMLParsedAsHTML):
|
||||
"""A subclass of the Python standard library's HTMLParser class, which
|
||||
listens for HTMLParser events and translates them into calls
|
||||
to Beautiful Soup's tree construction API.
|
||||
|
@ -88,6 +89,8 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
|||
# will ignore, assuming they ever show up.
|
||||
self.already_closed_empty_element = []
|
||||
|
||||
self._initialize_xml_detector()
|
||||
|
||||
def error(self, msg):
|
||||
"""In Python 3, HTMLParser subclasses must implement error(), although
|
||||
this requirement doesn't appear to be documented.
|
||||
|
@ -167,6 +170,9 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
|||
# But we might encounter an explicit closing tag for this tag
|
||||
# later on. If so, we want to ignore it.
|
||||
self.already_closed_empty_element.append(name)
|
||||
|
||||
if self._root_tag is None:
|
||||
self._root_tag_encountered(name)
|
||||
|
||||
def handle_endtag(self, name, check_already_closed=True):
|
||||
"""Handle a closing tag, e.g. '</tag>'
|
||||
|
@ -185,7 +191,7 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
|||
self.already_closed_empty_element.remove(name)
|
||||
else:
|
||||
self.soup.handle_endtag(name)
|
||||
|
||||
|
||||
def handle_data(self, data):
|
||||
"""Handle some textual data that shows up between tags."""
|
||||
self.soup.handle_data(data)
|
||||
|
@ -288,6 +294,7 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
|||
"""
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(data)
|
||||
self._document_might_be_xml(data)
|
||||
self.soup.endData(ProcessingInstruction)
|
||||
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ from bs4.element import (
|
|||
XMLProcessingInstruction,
|
||||
)
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
FAST,
|
||||
HTML,
|
||||
HTMLTreeBuilder,
|
||||
|
@ -79,15 +80,24 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
|
||||
This might be useful later on when creating CSS selectors.
|
||||
|
||||
This will track (almost) all namespaces, even ones that were
|
||||
only in scope for part of the document. If two namespaces have
|
||||
the same prefix, only the first one encountered will be
|
||||
tracked. Un-prefixed namespaces are not tracked.
|
||||
|
||||
:param mapping: A dictionary mapping namespace prefixes to URIs.
|
||||
"""
|
||||
for key, value in list(mapping.items()):
|
||||
# This is 'if key' and not 'if key is not None' because we
|
||||
# don't track un-prefixed namespaces. Soupselect will
|
||||
# treat an un-prefixed namespace as the default, which
|
||||
# causes confusion in some cases.
|
||||
if key and key not in self.soup._namespaces:
|
||||
# Let the BeautifulSoup object know about a new namespace.
|
||||
# If there are multiple namespaces defined with the same
|
||||
# prefix, the first one in the document takes precedence.
|
||||
self.soup._namespaces[key] = value
|
||||
|
||||
|
||||
def default_parser(self, encoding):
|
||||
"""Find the default parser for the given encoding.
|
||||
|
||||
|
@ -125,6 +135,7 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
self.empty_element_tags = set(empty_element_tags)
|
||||
self.soup = None
|
||||
self.nsmaps = [self.DEFAULT_NSMAPS_INVERTED]
|
||||
self.active_namespace_prefixes = [dict(self.DEFAULT_NSMAPS)]
|
||||
super(LXMLTreeBuilderForXML, self).__init__(**kwargs)
|
||||
|
||||
def _getNsTag(self, tag):
|
||||
|
@ -166,12 +177,21 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
is_html = not self.is_xml
|
||||
if is_html:
|
||||
self.processing_instruction_class = ProcessingInstruction
|
||||
# We're in HTML mode, so if we're given XML, that's worth
|
||||
# noting.
|
||||
DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(markup)
|
||||
else:
|
||||
self.processing_instruction_class = XMLProcessingInstruction
|
||||
|
||||
if isinstance(markup, str):
|
||||
# We were given Unicode. Maybe lxml can parse Unicode on
|
||||
# this system?
|
||||
|
||||
# TODO: This is a workaround for
|
||||
# https://bugs.launchpad.net/lxml/+bug/1948551.
|
||||
# We can remove it once the upstream issue is fixed.
|
||||
if len(markup) > 0 and markup[0] == u'\N{BYTE ORDER MARK}':
|
||||
markup = markup[1:]
|
||||
yield markup, None, document_declared_encoding, False
|
||||
|
||||
if isinstance(markup, str):
|
||||
|
@ -240,6 +260,20 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
# mappings.
|
||||
self.nsmaps.append(_invert(nsmap))
|
||||
|
||||
# The currently active namespace prefixes have
|
||||
# changed. Calculate the new mapping so it can be stored
|
||||
# with all Tag objects created while these prefixes are in
|
||||
# scope.
|
||||
current_mapping = dict(self.active_namespace_prefixes[-1])
|
||||
current_mapping.update(nsmap)
|
||||
|
||||
# We should not track un-prefixed namespaces as we can only hold one
|
||||
# and it will be recognized as the default namespace by soupsieve,
|
||||
# which may be confusing in some situations.
|
||||
if '' in current_mapping:
|
||||
del current_mapping['']
|
||||
self.active_namespace_prefixes.append(current_mapping)
|
||||
|
||||
# Also treat the namespace mapping as a set of attributes on the
|
||||
# tag, so we can recreate it later.
|
||||
attrs = attrs.copy()
|
||||
|
@ -264,8 +298,11 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
|
||||
namespace, name = self._getNsTag(name)
|
||||
nsprefix = self._prefix_for_namespace(namespace)
|
||||
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
|
||||
|
||||
self.soup.handle_starttag(
|
||||
name, namespace, nsprefix, attrs,
|
||||
namespaces=self.active_namespace_prefixes[-1]
|
||||
)
|
||||
|
||||
def _prefix_for_namespace(self, namespace):
|
||||
"""Find the currently active prefix for the given namespace."""
|
||||
if namespace is None:
|
||||
|
@ -289,13 +326,20 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
if len(self.nsmaps) > 1:
|
||||
# This tag, or one of its parents, introduced a namespace
|
||||
# mapping, so pop it off the stack.
|
||||
self.nsmaps.pop()
|
||||
out_of_scope_nsmap = self.nsmaps.pop()
|
||||
|
||||
if out_of_scope_nsmap is not None:
|
||||
# This tag introduced a namespace mapping which is no
|
||||
# longer in scope. Recalculate the currently active
|
||||
# namespace prefixes.
|
||||
self.active_namespace_prefixes.pop()
|
||||
|
||||
def pi(self, target, data):
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(target + ' ' + data)
|
||||
data = target + ' ' + data
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(self.processing_instruction_class)
|
||||
|
||||
|
||||
def data(self, content):
|
||||
self.soup.handle_data(content)
|
||||
|
||||
|
|
2297
lib/bs4/dammit.py
2297
lib/bs4/dammit.py
File diff suppressed because it is too large
Load diff
|
@ -4,7 +4,7 @@
|
|||
__license__ = "MIT"
|
||||
|
||||
import cProfile
|
||||
from io import StringIO
|
||||
from io import BytesIO
|
||||
from html.parser import HTMLParser
|
||||
import bs4
|
||||
from bs4 import BeautifulSoup, __version__
|
||||
|
@ -103,7 +103,13 @@ def lxml_trace(data, html=True, **kwargs):
|
|||
if False, lxml's XML parser will be used.
|
||||
"""
|
||||
from lxml import etree
|
||||
for event, element in etree.iterparse(StringIO(data), html=html, **kwargs):
|
||||
recover = kwargs.pop('recover', True)
|
||||
if isinstance(data, str):
|
||||
data = data.encode("utf8")
|
||||
reader = BytesIO(data)
|
||||
for event, element in etree.iterparse(
|
||||
reader, html=html, recover=recover, **kwargs
|
||||
):
|
||||
print(("%s, %4s, %s" % (event, element.tag, element.text)))
|
||||
|
||||
class AnnouncingParser(HTMLParser):
|
||||
|
|
|
@ -23,7 +23,6 @@ from bs4.formatter import (
|
|||
)
|
||||
|
||||
DEFAULT_OUTPUT_ENCODING = "utf-8"
|
||||
PY3K = (sys.version_info[0] > 2)
|
||||
|
||||
nonwhitespace_re = re.compile(r"\S+")
|
||||
|
||||
|
@ -555,7 +554,7 @@ class PageElement(object):
|
|||
parent.insert(index+1+offset, successor)
|
||||
offset += 1
|
||||
|
||||
def find_next(self, name=None, attrs={}, text=None, **kwargs):
|
||||
def find_next(self, name=None, attrs={}, string=None, **kwargs):
|
||||
"""Find the first PageElement that matches the given criteria and
|
||||
appears later in the document than this PageElement.
|
||||
|
||||
|
@ -564,15 +563,15 @@ class PageElement(object):
|
|||
|
||||
:param name: A filter on tag name.
|
||||
:param attrs: A dictionary of filters on attribute values.
|
||||
:param text: A filter for a NavigableString with specific text.
|
||||
:param string: A filter for a NavigableString with specific text.
|
||||
:kwargs: A dictionary of filters on attribute values.
|
||||
:return: A PageElement.
|
||||
:rtype: bs4.element.Tag | bs4.element.NavigableString
|
||||
"""
|
||||
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
|
||||
return self._find_one(self.find_all_next, name, attrs, string, **kwargs)
|
||||
findNext = find_next # BS3
|
||||
|
||||
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
|
||||
def find_all_next(self, name=None, attrs={}, string=None, limit=None,
|
||||
**kwargs):
|
||||
"""Find all PageElements that match the given criteria and appear
|
||||
later in the document than this PageElement.
|
||||
|
@ -582,16 +581,16 @@ class PageElement(object):
|
|||
|
||||
:param name: A filter on tag name.
|
||||
:param attrs: A dictionary of filters on attribute values.
|
||||
:param text: A filter for a NavigableString with specific text.
|
||||
:param string: A filter for a NavigableString with specific text.
|
||||
:param limit: Stop looking after finding this many results.
|
||||
:kwargs: A dictionary of filters on attribute values.
|
||||
:return: A ResultSet containing PageElements.
|
||||
"""
|
||||
return self._find_all(name, attrs, text, limit, self.next_elements,
|
||||
return self._find_all(name, attrs, string, limit, self.next_elements,
|
||||
**kwargs)
|
||||
findAllNext = find_all_next # BS3
|
||||
|
||||
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
|
||||
def find_next_sibling(self, name=None, attrs={}, string=None, **kwargs):
|
||||
"""Find the closest sibling to this PageElement that matches the
|
||||
given criteria and appears later in the document.
|
||||
|
||||
|
@ -600,16 +599,16 @@ class PageElement(object):
|
|||
|
||||
:param name: A filter on tag name.
|
||||
:param attrs: A dictionary of filters on attribute values.
|
||||
:param text: A filter for a NavigableString with specific text.
|
||||
:param string: A filter for a NavigableString with specific text.
|
||||
:kwargs: A dictionary of filters on attribute values.
|
||||
:return: A PageElement.
|
||||
:rtype: bs4.element.Tag | bs4.element.NavigableString
|
||||
"""
|
||||
return self._find_one(self.find_next_siblings, name, attrs, text,
|
||||
return self._find_one(self.find_next_siblings, name, attrs, string,
|
||||
**kwargs)
|
||||
findNextSibling = find_next_sibling # BS3
|
||||
|
||||
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
|
||||
def find_next_siblings(self, name=None, attrs={}, string=None, limit=None,
|
||||
**kwargs):
|
||||
"""Find all siblings of this PageElement that match the given criteria
|
||||
and appear later in the document.
|
||||
|
@ -619,18 +618,18 @@ class PageElement(object):
|
|||
|
||||
:param name: A filter on tag name.
|
||||
:param attrs: A dictionary of filters on attribute values.
|
||||
:param text: A filter for a NavigableString with specific text.
|
||||
:param string: A filter for a NavigableString with specific text.
|
||||
:param limit: Stop looking after finding this many results.
|
||||
:kwargs: A dictionary of filters on attribute values.
|
||||
:return: A ResultSet of PageElements.
|
||||
:rtype: bs4.element.ResultSet
|
||||
"""
|
||||
return self._find_all(name, attrs, text, limit,
|
||||
return self._find_all(name, attrs, string, limit,
|
||||
self.next_siblings, **kwargs)
|
||||
findNextSiblings = find_next_siblings # BS3
|
||||
fetchNextSiblings = find_next_siblings # BS2
|
||||
|
||||
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
|
||||
def find_previous(self, name=None, attrs={}, string=None, **kwargs):
|
||||
"""Look backwards in the document from this PageElement and find the
|
||||
first PageElement that matches the given criteria.
|
||||
|
||||
|
@ -639,16 +638,16 @@ class PageElement(object):
|
|||
|
||||
:param name: A filter on tag name.
|
||||
:param attrs: A dictionary of filters on attribute values.
|
||||
:param text: A filter for a NavigableString with specific text.
|
||||
:param string: A filter for a NavigableString with specific text.
|
||||
:kwargs: A dictionary of filters on attribute values.
|
||||
:return: A PageElement.
|
||||
:rtype: bs4.element.Tag | bs4.element.NavigableString
|
||||
"""
|
||||
return self._find_one(
|
||||
self.find_all_previous, name, attrs, text, **kwargs)
|
||||
self.find_all_previous, name, attrs, string, **kwargs)
|
||||
findPrevious = find_previous # BS3
|
||||
|
||||
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
|
||||
def find_all_previous(self, name=None, attrs={}, string=None, limit=None,
|
||||
**kwargs):
|
||||
"""Look backwards in the document from this PageElement and find all
|
||||
PageElements that match the given criteria.
|
||||
|
@ -658,18 +657,18 @@ class PageElement(object):
|
|||
|
||||
:param name: A filter on tag name.
|
||||
:param attrs: A dictionary of filters on attribute values.
|
||||
:param text: A filter for a NavigableString with specific text.
|
||||
:param string: A filter for a NavigableString with specific text.
|
||||
:param limit: Stop looking after finding this many results.
|
||||
:kwargs: A dictionary of filters on attribute values.
|
||||
:return: A ResultSet of PageElements.
|
||||
:rtype: bs4.element.ResultSet
|
||||
"""
|
||||
return self._find_all(name, attrs, text, limit, self.previous_elements,
|
||||
return self._find_all(name, attrs, string, limit, self.previous_elements,
|
||||
**kwargs)
|
||||
findAllPrevious = find_all_previous # BS3
|
||||
fetchPrevious = find_all_previous # BS2
|
||||
|
||||
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
|
||||
def find_previous_sibling(self, name=None, attrs={}, string=None, **kwargs):
|
||||
"""Returns the closest sibling to this PageElement that matches the
|
||||
given criteria and appears earlier in the document.
|
||||
|
||||
|
@ -678,16 +677,16 @@ class PageElement(object):
|
|||
|
||||
:param name: A filter on tag name.
|
||||
:param attrs: A dictionary of filters on attribute values.
|
||||
:param text: A filter for a NavigableString with specific text.
|
||||
:param string: A filter for a NavigableString with specific text.
|
||||
:kwargs: A dictionary of filters on attribute values.
|
||||
:return: A PageElement.
|
||||
:rtype: bs4.element.Tag | bs4.element.NavigableString
|
||||
"""
|
||||
return self._find_one(self.find_previous_siblings, name, attrs, text,
|
||||
return self._find_one(self.find_previous_siblings, name, attrs, string,
|
||||
**kwargs)
|
||||
findPreviousSibling = find_previous_sibling # BS3
|
||||
|
||||
def find_previous_siblings(self, name=None, attrs={}, text=None,
|
||||
def find_previous_siblings(self, name=None, attrs={}, string=None,
|
||||
limit=None, **kwargs):
|
||||
"""Returns all siblings to this PageElement that match the
|
||||
given criteria and appear earlier in the document.
|
||||
|
@ -697,13 +696,13 @@ class PageElement(object):
|
|||
|
||||
:param name: A filter on tag name.
|
||||
:param attrs: A dictionary of filters on attribute values.
|
||||
:param text: A filter for a NavigableString with specific text.
|
||||
:param string: A filter for a NavigableString with specific text.
|
||||
:param limit: Stop looking after finding this many results.
|
||||
:kwargs: A dictionary of filters on attribute values.
|
||||
:return: A ResultSet of PageElements.
|
||||
:rtype: bs4.element.ResultSet
|
||||
"""
|
||||
return self._find_all(name, attrs, text, limit,
|
||||
return self._find_all(name, attrs, string, limit,
|
||||
self.previous_siblings, **kwargs)
|
||||
findPreviousSiblings = find_previous_siblings # BS3
|
||||
fetchPreviousSiblings = find_previous_siblings # BS2
|
||||
|
@ -770,26 +769,29 @@ class PageElement(object):
|
|||
|
||||
#These methods do the real heavy lifting.
|
||||
|
||||
def _find_one(self, method, name, attrs, text, **kwargs):
|
||||
def _find_one(self, method, name, attrs, string, **kwargs):
|
||||
r = None
|
||||
l = method(name, attrs, text, 1, **kwargs)
|
||||
l = method(name, attrs, string, 1, **kwargs)
|
||||
if l:
|
||||
r = l[0]
|
||||
return r
|
||||
|
||||
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
|
||||
def _find_all(self, name, attrs, string, limit, generator, **kwargs):
|
||||
"Iterates over a generator looking for things that match."
|
||||
|
||||
if text is None and 'string' in kwargs:
|
||||
text = kwargs['string']
|
||||
del kwargs['string']
|
||||
if string is None and 'text' in kwargs:
|
||||
string = kwargs.pop('text')
|
||||
warnings.warn(
|
||||
"The 'text' argument to find()-type methods is deprecated. Use 'string' instead.",
|
||||
DeprecationWarning
|
||||
)
|
||||
|
||||
if isinstance(name, SoupStrainer):
|
||||
strainer = name
|
||||
else:
|
||||
strainer = SoupStrainer(name, attrs, text, **kwargs)
|
||||
strainer = SoupStrainer(name, attrs, string, **kwargs)
|
||||
|
||||
if text is None and not limit and not attrs and not kwargs:
|
||||
if string is None and not limit and not attrs and not kwargs:
|
||||
if name is True or name is None:
|
||||
# Optimization to find all tags.
|
||||
result = (element for element in generator
|
||||
|
@ -1013,6 +1015,11 @@ class NavigableString(str, PageElement):
|
|||
|
||||
# Do nothing if the caller is looking for specific types of
|
||||
# string, and we're of a different type.
|
||||
#
|
||||
# We check specific types instead of using isinstance(self,
|
||||
# types) because all of these classes subclass
|
||||
# NavigableString. Anyone who's using this feature probably
|
||||
# wants generic NavigableStrings but not other stuff.
|
||||
my_type = type(self)
|
||||
if types is not None:
|
||||
if isinstance(types, type):
|
||||
|
@ -1141,6 +1148,27 @@ class TemplateString(NavigableString):
|
|||
pass
|
||||
|
||||
|
||||
class RubyTextString(NavigableString):
|
||||
"""A NavigableString representing the contents of the <rt> HTML
|
||||
element.
|
||||
|
||||
https://dev.w3.org/html5/spec-LC/text-level-semantics.html#the-rt-element
|
||||
|
||||
Can be used to distinguish such strings from the strings they're
|
||||
annotating.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class RubyParenthesisString(NavigableString):
|
||||
"""A NavigableString representing the contents of the <rp> HTML
|
||||
element.
|
||||
|
||||
https://dev.w3.org/html5/spec-LC/text-level-semantics.html#the-rp-element
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class Tag(PageElement):
|
||||
"""Represents an HTML or XML tag that is part of a parse tree, along
|
||||
with its attributes and contents.
|
||||
|
@ -1155,6 +1183,7 @@ class Tag(PageElement):
|
|||
can_be_empty_element=None, cdata_list_attributes=None,
|
||||
preserve_whitespace_tags=None,
|
||||
interesting_string_types=None,
|
||||
namespaces=None
|
||||
):
|
||||
"""Basic constructor.
|
||||
|
||||
|
@ -1187,6 +1216,9 @@ class Tag(PageElement):
|
|||
to be considered. The default is to consider
|
||||
NavigableString and CData the only interesting string
|
||||
subtypes.
|
||||
:param namespaces: A dictionary mapping currently active
|
||||
namespace prefixes to URIs. This can be used later to
|
||||
construct CSS selectors.
|
||||
"""
|
||||
if parser is None:
|
||||
self.parser_class = None
|
||||
|
@ -1198,6 +1230,7 @@ class Tag(PageElement):
|
|||
raise ValueError("No value provided for new tag's name.")
|
||||
self.name = name
|
||||
self.namespace = namespace
|
||||
self._namespaces = namespaces or {}
|
||||
self.prefix = prefix
|
||||
if ((not builder or builder.store_line_numbers)
|
||||
and (sourceline is not None or sourcepos is not None)):
|
||||
|
@ -1280,7 +1313,7 @@ class Tag(PageElement):
|
|||
for child in self.contents:
|
||||
clone.append(child.__copy__())
|
||||
return clone
|
||||
|
||||
|
||||
@property
|
||||
def is_empty_element(self):
|
||||
"""Is this tag an empty-element tag? (aka a self-closing tag)
|
||||
|
@ -1524,7 +1557,8 @@ class Tag(PageElement):
|
|||
warnings.warn(
|
||||
'.%(name)sTag is deprecated, use .find("%(name)s") instead. If you really were looking for a tag called %(name)sTag, use .find("%(name)sTag")' % dict(
|
||||
name=tag_name
|
||||
)
|
||||
),
|
||||
DeprecationWarning
|
||||
)
|
||||
return self.find(tag_name)
|
||||
# We special case contents to avoid recursion.
|
||||
|
@ -1558,36 +1592,19 @@ class Tag(PageElement):
|
|||
def __repr__(self, encoding="unicode-escape"):
|
||||
"""Renders this PageElement as a string.
|
||||
|
||||
:param encoding: The encoding to use (Python 2 only).
|
||||
:return: Under Python 2, a bytestring; under Python 3,
|
||||
a Unicode string.
|
||||
:param encoding: The encoding to use (Python 2 only).
|
||||
TODO: This is now ignored and a warning should be issued
|
||||
if a value is provided.
|
||||
:return: A (Unicode) string.
|
||||
"""
|
||||
if PY3K:
|
||||
# "The return value must be a string object", i.e. Unicode
|
||||
return self.decode()
|
||||
else:
|
||||
# "The return value must be a string object", i.e. a bytestring.
|
||||
# By convention, the return value of __repr__ should also be
|
||||
# an ASCII string.
|
||||
return self.encode(encoding)
|
||||
# "The return value must be a string object", i.e. Unicode
|
||||
return self.decode()
|
||||
|
||||
def __unicode__(self):
|
||||
"""Renders this PageElement as a Unicode string."""
|
||||
return self.decode()
|
||||
|
||||
def __str__(self):
|
||||
"""Renders this PageElement as a generic string.
|
||||
|
||||
:return: Under Python 2, a UTF-8 bytestring; under Python 3,
|
||||
a Unicode string.
|
||||
"""
|
||||
if PY3K:
|
||||
return self.decode()
|
||||
else:
|
||||
return self.encode()
|
||||
|
||||
if PY3K:
|
||||
__str__ = __repr__ = __unicode__
|
||||
__str__ = __repr__ = __unicode__
|
||||
|
||||
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
|
||||
indent_level=None, formatter="minimal",
|
||||
|
@ -1597,8 +1614,10 @@ class Tag(PageElement):
|
|||
|
||||
:param encoding: The destination encoding.
|
||||
:param indent_level: Each line of the rendering will be
|
||||
indented this many spaces. Used internally in
|
||||
recursive calls while pretty-printing.
|
||||
indented this many levels. (The formatter decides what a
|
||||
'level' means in terms of spaces or other characters
|
||||
output.) Used internally in recursive calls while
|
||||
pretty-printing.
|
||||
:param formatter: A Formatter object, or a string naming one of
|
||||
the standard formatters.
|
||||
:param errors: An error handling strategy such as
|
||||
|
@ -1674,7 +1693,7 @@ class Tag(PageElement):
|
|||
space = ''
|
||||
indent_space = ''
|
||||
if indent_level is not None:
|
||||
indent_space = (' ' * (indent_level - 1))
|
||||
indent_space = (formatter.indent * (indent_level - 1))
|
||||
if pretty_print:
|
||||
space = indent_space
|
||||
indent_contents = indent_level + 1
|
||||
|
@ -1749,8 +1768,10 @@ class Tag(PageElement):
|
|||
"""Renders the contents of this tag as a Unicode string.
|
||||
|
||||
:param indent_level: Each line of the rendering will be
|
||||
indented this many spaces. Used internally in
|
||||
recursive calls while pretty-printing.
|
||||
indented this many levels. (The formatter decides what a
|
||||
'level' means in terms of spaces or other characters
|
||||
output.) Used internally in recursive calls while
|
||||
pretty-printing.
|
||||
|
||||
:param eventual_encoding: The tag is destined to be
|
||||
encoded into this encoding. decode_contents() is _not_
|
||||
|
@ -1761,6 +1782,7 @@ class Tag(PageElement):
|
|||
|
||||
:param formatter: A Formatter object, or a string naming one of
|
||||
the standard Formatters.
|
||||
|
||||
"""
|
||||
# First off, turn a string formatter into a Formatter object. This
|
||||
# will stop the lookup from happening over and over again.
|
||||
|
@ -1783,7 +1805,7 @@ class Tag(PageElement):
|
|||
text = text.strip()
|
||||
if text:
|
||||
if pretty_print and not preserve_whitespace:
|
||||
s.append(" " * (indent_level - 1))
|
||||
s.append(formatter.indent * (indent_level - 1))
|
||||
s.append(text)
|
||||
if pretty_print and not preserve_whitespace:
|
||||
s.append("\n")
|
||||
|
@ -1795,8 +1817,10 @@ class Tag(PageElement):
|
|||
"""Renders the contents of this PageElement as a bytestring.
|
||||
|
||||
:param indent_level: Each line of the rendering will be
|
||||
indented this many spaces. Used internally in
|
||||
recursive calls while pretty-printing.
|
||||
indented this many levels. (The formatter decides what a
|
||||
'level' means in terms of spaces or other characters
|
||||
output.) Used internally in recursive calls while
|
||||
pretty-printing.
|
||||
|
||||
:param eventual_encoding: The bytestring will be in this encoding.
|
||||
|
||||
|
@ -1819,7 +1843,7 @@ class Tag(PageElement):
|
|||
|
||||
#Soup methods
|
||||
|
||||
def find(self, name=None, attrs={}, recursive=True, text=None,
|
||||
def find(self, name=None, attrs={}, recursive=True, string=None,
|
||||
**kwargs):
|
||||
"""Look in the children of this PageElement and find the first
|
||||
PageElement that matches the given criteria.
|
||||
|
@ -1838,13 +1862,13 @@ class Tag(PageElement):
|
|||
:rtype: bs4.element.Tag | bs4.element.NavigableString
|
||||
"""
|
||||
r = None
|
||||
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
|
||||
l = self.find_all(name, attrs, recursive, string, 1, **kwargs)
|
||||
if l:
|
||||
r = l[0]
|
||||
return r
|
||||
findChild = find #BS2
|
||||
|
||||
def find_all(self, name=None, attrs={}, recursive=True, text=None,
|
||||
def find_all(self, name=None, attrs={}, recursive=True, string=None,
|
||||
limit=None, **kwargs):
|
||||
"""Look in the children of this PageElement and find all
|
||||
PageElements that match the given criteria.
|
||||
|
@ -1865,7 +1889,7 @@ class Tag(PageElement):
|
|||
generator = self.descendants
|
||||
if not recursive:
|
||||
generator = self.children
|
||||
return self._find_all(name, attrs, text, limit, generator, **kwargs)
|
||||
return self._find_all(name, attrs, string, limit, generator, **kwargs)
|
||||
findAll = find_all # BS3
|
||||
findChildren = find_all # BS2
|
||||
|
||||
|
@ -1967,8 +1991,10 @@ class Tag(PageElement):
|
|||
|
||||
has_key() is gone in Python 3, anyway.
|
||||
"""
|
||||
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
|
||||
key))
|
||||
warnings.warn(
|
||||
'has_key is deprecated. Use has_attr(key) instead.',
|
||||
DeprecationWarning
|
||||
)
|
||||
return self.has_attr(key)
|
||||
|
||||
# Next, a couple classes to represent queries and their results.
|
||||
|
@ -1982,7 +2008,7 @@ class SoupStrainer(object):
|
|||
document.
|
||||
"""
|
||||
|
||||
def __init__(self, name=None, attrs={}, text=None, **kwargs):
|
||||
def __init__(self, name=None, attrs={}, string=None, **kwargs):
|
||||
"""Constructor.
|
||||
|
||||
The SoupStrainer constructor takes the same arguments passed
|
||||
|
@ -1991,9 +2017,16 @@ class SoupStrainer(object):
|
|||
|
||||
:param name: A filter on tag name.
|
||||
:param attrs: A dictionary of filters on attribute values.
|
||||
:param text: A filter for a NavigableString with specific text.
|
||||
:param string: A filter for a NavigableString with specific text.
|
||||
:kwargs: A dictionary of filters on attribute values.
|
||||
"""
|
||||
if string is None and 'text' in kwargs:
|
||||
string = kwargs.pop('text')
|
||||
warnings.warn(
|
||||
"The 'text' argument to the SoupStrainer constructor is deprecated. Use 'string' instead.",
|
||||
DeprecationWarning
|
||||
)
|
||||
|
||||
self.name = self._normalize_search_value(name)
|
||||
if not isinstance(attrs, dict):
|
||||
# Treat a non-dict value for attrs as a search for the 'class'
|
||||
|
@ -2018,7 +2051,10 @@ class SoupStrainer(object):
|
|||
normalized_attrs[key] = self._normalize_search_value(value)
|
||||
|
||||
self.attrs = normalized_attrs
|
||||
self.text = self._normalize_search_value(text)
|
||||
self.string = self._normalize_search_value(string)
|
||||
|
||||
# DEPRECATED but just in case someone is checking this.
|
||||
self.text = self.string
|
||||
|
||||
def _normalize_search_value(self, value):
|
||||
# Leave it alone if it's a Unicode string, a callable, a
|
||||
|
@ -2052,8 +2088,8 @@ class SoupStrainer(object):
|
|||
|
||||
def __str__(self):
|
||||
"""A human-readable representation of this SoupStrainer."""
|
||||
if self.text:
|
||||
return self.text
|
||||
if self.string:
|
||||
return self.string
|
||||
else:
|
||||
return "%s|%s" % (self.name, self.attrs)
|
||||
|
||||
|
@ -2113,7 +2149,7 @@ class SoupStrainer(object):
|
|||
found = markup
|
||||
else:
|
||||
found = markup_name
|
||||
if found and self.text and not self._matches(found.string, self.text):
|
||||
if found and self.string and not self._matches(found.string, self.string):
|
||||
found = None
|
||||
return found
|
||||
|
||||
|
@ -2141,12 +2177,12 @@ class SoupStrainer(object):
|
|||
# If it's a Tag, make sure its name or attributes match.
|
||||
# Don't bother with Tags if we're searching for text.
|
||||
elif isinstance(markup, Tag):
|
||||
if not self.text or self.name or self.attrs:
|
||||
if not self.string or self.name or self.attrs:
|
||||
found = self.search_tag(markup)
|
||||
# If it's text, make sure the text matches.
|
||||
elif isinstance(markup, NavigableString) or \
|
||||
isinstance(markup, str):
|
||||
if not self.name and not self.attrs and self._matches(markup, self.text):
|
||||
if not self.name and not self.attrs and self._matches(markup, self.string):
|
||||
found = markup
|
||||
else:
|
||||
raise Exception(
|
||||
|
|
|
@ -49,7 +49,7 @@ class Formatter(EntitySubstitution):
|
|||
def __init__(
|
||||
self, language=None, entity_substitution=None,
|
||||
void_element_close_prefix='/', cdata_containing_tags=None,
|
||||
empty_attributes_are_booleans=False,
|
||||
empty_attributes_are_booleans=False, indent=1,
|
||||
):
|
||||
"""Constructor.
|
||||
|
||||
|
@ -69,6 +69,15 @@ class Formatter(EntitySubstitution):
|
|||
:param blank_attributes_are_booleans: Render attributes whose value
|
||||
is the empty string as HTML-style boolean attributes.
|
||||
(Attributes whose value is None are always rendered this way.)
|
||||
|
||||
:param indent: If indent is a non-negative integer or string,
|
||||
then the contents of elements will be indented
|
||||
appropriately when pretty-printing. An indent level of 0,
|
||||
negative, or "" will only insert newlines. Using a
|
||||
positive integer indent indents that many spaces per
|
||||
level. If indent is a string (such as "\t"), that string
|
||||
is used to indent each level. The default behavior to
|
||||
indent one space per level.
|
||||
"""
|
||||
self.language = language
|
||||
self.entity_substitution = entity_substitution
|
||||
|
@ -77,6 +86,17 @@ class Formatter(EntitySubstitution):
|
|||
language, cdata_containing_tags, 'cdata_containing_tags'
|
||||
)
|
||||
self.empty_attributes_are_booleans=empty_attributes_are_booleans
|
||||
if indent is None:
|
||||
indent = 0
|
||||
if isinstance(indent, int):
|
||||
if indent < 0:
|
||||
indent = 0
|
||||
indent = ' ' * indent
|
||||
elif isinstance(indent, str):
|
||||
indent = indent
|
||||
else:
|
||||
indent = ' '
|
||||
self.indent = indent
|
||||
|
||||
def substitute(self, ns):
|
||||
"""Process a string that needs to undergo entity substitution.
|
||||
|
|
|
@ -7,9 +7,8 @@ __license__ = "MIT"
|
|||
import pickle
|
||||
import copy
|
||||
import functools
|
||||
import unittest
|
||||
import warnings
|
||||
from unittest import TestCase
|
||||
import pytest
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import (
|
||||
CharsetMetaAttributeValue,
|
||||
|
@ -23,7 +22,11 @@ from bs4.element import (
|
|||
Tag
|
||||
)
|
||||
|
||||
from bs4.builder import HTMLParserTreeBuilder
|
||||
from bs4.builder import (
|
||||
DetectsXMLParsedAsHTML,
|
||||
HTMLParserTreeBuilder,
|
||||
XMLParsedAsHTMLWarning,
|
||||
)
|
||||
default_builder = HTMLParserTreeBuilder
|
||||
|
||||
BAD_DOCUMENT = """A bare string
|
||||
|
@ -63,7 +66,7 @@ BAD_DOCUMENT = """A bare string
|
|||
"""
|
||||
|
||||
|
||||
class SoupTest(unittest.TestCase):
|
||||
class SoupTest(object):
|
||||
|
||||
@property
|
||||
def default_builder(self):
|
||||
|
@ -80,15 +83,18 @@ class SoupTest(unittest.TestCase):
|
|||
The details depend on the builder.
|
||||
"""
|
||||
return self.default_builder(**kwargs).test_fragment_to_document(markup)
|
||||
|
||||
def assertSoupEquals(self, to_parse, compare_parsed_to=None):
|
||||
|
||||
def assert_soup(self, to_parse, compare_parsed_to=None):
|
||||
"""Parse some markup using Beautiful Soup and verify that
|
||||
the output markup is as expected.
|
||||
"""
|
||||
builder = self.default_builder
|
||||
obj = BeautifulSoup(to_parse, builder=builder)
|
||||
if compare_parsed_to is None:
|
||||
compare_parsed_to = to_parse
|
||||
|
||||
# Verify that the documents come out the same.
|
||||
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
|
||||
assert obj.decode() == self.document_for(compare_parsed_to)
|
||||
|
||||
# Also run some checks on the BeautifulSoup object itself:
|
||||
|
||||
|
@ -99,9 +105,9 @@ class SoupTest(unittest.TestCase):
|
|||
|
||||
# The only tag in the tag stack is the one for the root
|
||||
# document.
|
||||
self.assertEqual(
|
||||
[obj.ROOT_TAG_NAME], [x.name for x in obj.tagStack]
|
||||
)
|
||||
assert [obj.ROOT_TAG_NAME] == [x.name for x in obj.tagStack]
|
||||
|
||||
assertSoupEquals = assert_soup
|
||||
|
||||
def assertConnectedness(self, element):
|
||||
"""Ensure that next_element and previous_element are properly
|
||||
|
@ -110,8 +116,8 @@ class SoupTest(unittest.TestCase):
|
|||
earlier = None
|
||||
for e in element.descendants:
|
||||
if earlier:
|
||||
self.assertEqual(e, earlier.next_element)
|
||||
self.assertEqual(earlier, e.previous_element)
|
||||
assert e == earlier.next_element
|
||||
assert earlier == e.previous_element
|
||||
earlier = e
|
||||
|
||||
def linkage_validator(self, el, _recursive_call=False):
|
||||
|
@ -228,10 +234,47 @@ class SoupTest(unittest.TestCase):
|
|||
# Return the child to the recursive caller
|
||||
return child
|
||||
|
||||
def assert_selects(self, tags, should_match):
|
||||
"""Make sure that the given tags have the correct text.
|
||||
|
||||
This is used in tests that define a bunch of tags, each
|
||||
containing a single string, and then select certain strings by
|
||||
some mechanism.
|
||||
"""
|
||||
assert [tag.string for tag in tags] == should_match
|
||||
|
||||
def assert_selects_ids(self, tags, should_match):
|
||||
"""Make sure that the given tags have the correct IDs.
|
||||
|
||||
This is used in tests that define a bunch of tags, each
|
||||
containing a single string, and then select certain strings by
|
||||
some mechanism.
|
||||
"""
|
||||
assert [tag['id'] for tag in tags] == should_match
|
||||
|
||||
|
||||
class TreeBuilderSmokeTest(object):
|
||||
# Tests that are common to HTML and XML tree builders.
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"multi_valued_attributes",
|
||||
[None, dict(b=['class']), {'*': ['notclass']}]
|
||||
)
|
||||
def test_attribute_not_multi_valued(self, multi_valued_attributes):
|
||||
markup = '<a class="a b c">'
|
||||
soup = self.soup(markup, multi_valued_attributes=multi_valued_attributes)
|
||||
assert soup.a['class'] == 'a b c'
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"multi_valued_attributes", [dict(a=['class']), {'*': ['class']}]
|
||||
)
|
||||
def test_attribute_multi_valued(self, multi_valued_attributes):
|
||||
markup = '<a class="a b c">'
|
||||
soup = self.soup(
|
||||
markup, multi_valued_attributes=multi_valued_attributes
|
||||
)
|
||||
assert soup.a['class'] == ['a', 'b', 'c']
|
||||
|
||||
def test_fuzzed_input(self):
|
||||
# This test centralizes in one place the various fuzz tests
|
||||
# for Beautiful Soup created by the oss-fuzz project.
|
||||
|
@ -283,7 +326,7 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
]:
|
||||
soup = self.soup("")
|
||||
new_tag = soup.new_tag(name)
|
||||
self.assertEqual(True, new_tag.is_empty_element)
|
||||
assert new_tag.is_empty_element == True
|
||||
|
||||
def test_special_string_containers(self):
|
||||
soup = self.soup(
|
||||
|
@ -298,7 +341,7 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
assert isinstance(soup.style.string, Stylesheet)
|
||||
# The contents of the style tag resemble an HTML comment, but
|
||||
# it's not treated as a comment.
|
||||
self.assertEqual("<!--Some CSS-->", soup.style.string)
|
||||
assert soup.style.string == "<!--Some CSS-->"
|
||||
assert isinstance(soup.style.string, Stylesheet)
|
||||
|
||||
def test_pickle_and_unpickle_identity(self):
|
||||
|
@ -307,8 +350,8 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
tree = self.soup("<a><b>foo</a>")
|
||||
dumped = pickle.dumps(tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
self.assertEqual(loaded.__class__, BeautifulSoup)
|
||||
self.assertEqual(loaded.decode(), tree.decode())
|
||||
assert loaded.__class__ == BeautifulSoup
|
||||
assert loaded.decode() == tree.decode()
|
||||
|
||||
def assertDoctypeHandled(self, doctype_fragment):
|
||||
"""Assert that a given doctype string is handled correctly."""
|
||||
|
@ -316,16 +359,13 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
|
||||
# Make sure a Doctype object was created.
|
||||
doctype = soup.contents[0]
|
||||
self.assertEqual(doctype.__class__, Doctype)
|
||||
self.assertEqual(doctype, doctype_fragment)
|
||||
self.assertEqual(
|
||||
soup.encode("utf8")[:len(doctype_str)],
|
||||
doctype_str
|
||||
)
|
||||
assert doctype.__class__ == Doctype
|
||||
assert doctype == doctype_fragment
|
||||
assert soup.encode("utf8")[:len(doctype_str)] == doctype_str
|
||||
|
||||
# Make sure that the doctype was correctly associated with the
|
||||
# parse tree and that the rest of the document parsed.
|
||||
self.assertEqual(soup.p.contents[0], 'foo')
|
||||
assert soup.p.contents[0] == 'foo'
|
||||
|
||||
def _document_with_doctype(self, doctype_fragment, doctype_string="DOCTYPE"):
|
||||
"""Generate and parse a document with the given doctype."""
|
||||
|
@ -343,7 +383,7 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
def test_empty_doctype(self):
|
||||
soup = self.soup("<!DOCTYPE>")
|
||||
doctype = soup.contents[0]
|
||||
self.assertEqual("", doctype.strip())
|
||||
assert "" == doctype.strip()
|
||||
|
||||
def test_mixed_case_doctype(self):
|
||||
# A lowercase or mixed-case doctype becomes a Doctype.
|
||||
|
@ -355,16 +395,13 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
# Make sure a Doctype object was created and that the DOCTYPE
|
||||
# is uppercase.
|
||||
doctype = soup.contents[0]
|
||||
self.assertEqual(doctype.__class__, Doctype)
|
||||
self.assertEqual(doctype, "html")
|
||||
self.assertEqual(
|
||||
soup.encode("utf8")[:len(doctype_str)],
|
||||
b"<!DOCTYPE html>"
|
||||
)
|
||||
assert doctype.__class__ == Doctype
|
||||
assert doctype == "html"
|
||||
assert soup.encode("utf8")[:len(doctype_str)] == b"<!DOCTYPE html>"
|
||||
|
||||
# Make sure that the doctype was correctly associated with the
|
||||
# parse tree and that the rest of the document parsed.
|
||||
self.assertEqual(soup.p.contents[0], 'foo')
|
||||
assert soup.p.contents[0] == 'foo'
|
||||
|
||||
def test_public_doctype_with_url(self):
|
||||
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
|
||||
|
@ -389,18 +426,43 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
<head><title>Hello.</title></head>
|
||||
<body>Goodbye.</body>
|
||||
</html>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(
|
||||
soup.encode("utf-8").replace(b"\n", b""),
|
||||
markup.replace(b"\n", b""))
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup(markup)
|
||||
assert soup.encode("utf-8").replace(b"\n", b"") == markup.replace(b"\n", b"")
|
||||
|
||||
# No warning was issued about parsing an XML document as HTML,
|
||||
# because XHTML is both.
|
||||
assert w == []
|
||||
|
||||
|
||||
def test_namespaced_html(self):
|
||||
"""When a namespaced XML document is parsed as HTML it should
|
||||
be treated as HTML with weird tag names.
|
||||
"""
|
||||
# When a namespaced XML document is parsed as HTML it should
|
||||
# be treated as HTML with weird tag names.
|
||||
markup = b"""<ns1:foo>content</ns1:foo><ns1:foo/><ns2:foo/>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(2, len(soup.find_all("ns1:foo")))
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup(markup)
|
||||
|
||||
assert 2 == len(soup.find_all("ns1:foo"))
|
||||
|
||||
# n.b. no "you're parsing XML as HTML" warning was given
|
||||
# because there was no XML declaration.
|
||||
assert [] == w
|
||||
|
||||
def test_detect_xml_parsed_as_html(self):
|
||||
# A warning is issued when parsing an XML document as HTML,
|
||||
# but basic stuff should still work.
|
||||
markup = b"""<?xml version="1.0" encoding="utf-8"?><tag>string</tag>"""
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup(markup)
|
||||
assert soup.tag.string == 'string'
|
||||
[warning] = w
|
||||
assert isinstance(warning.message, XMLParsedAsHTMLWarning)
|
||||
assert str(warning.message) == XMLParsedAsHTMLWarning.MESSAGE
|
||||
|
||||
# NOTE: the warning is not issued if the document appears to
|
||||
# be XHTML (tested with test_real_xhtml_document in the
|
||||
# superclass) or if there is no XML declaration (tested with
|
||||
# test_namespaced_html in the superclass).
|
||||
|
||||
def test_processing_instruction(self):
|
||||
# We test both Unicode and bytestring to verify that
|
||||
|
@ -409,11 +471,11 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
# need to process anything.
|
||||
markup = """<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.decode())
|
||||
assert markup == soup.decode()
|
||||
|
||||
markup = b"""<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode("utf8"))
|
||||
assert markup == soup.encode("utf8")
|
||||
|
||||
def test_deepcopy(self):
|
||||
"""Make sure you can copy the tree builder.
|
||||
|
@ -430,18 +492,18 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
shouldn't be presented that way.
|
||||
"""
|
||||
soup = self.soup("<p/>")
|
||||
self.assertFalse(soup.p.is_empty_element)
|
||||
self.assertEqual(str(soup.p), "<p></p>")
|
||||
assert not soup.p.is_empty_element
|
||||
assert str(soup.p) == "<p></p>"
|
||||
|
||||
def test_unclosed_tags_get_closed(self):
|
||||
"""A tag that's not closed by the end of the document should be closed.
|
||||
|
||||
This applies to all tags except empty-element tags.
|
||||
"""
|
||||
self.assertSoupEquals("<p>", "<p></p>")
|
||||
self.assertSoupEquals("<b>", "<b></b>")
|
||||
self.assert_soup("<p>", "<p></p>")
|
||||
self.assert_soup("<b>", "<b></b>")
|
||||
|
||||
self.assertSoupEquals("<br>", "<br/>")
|
||||
self.assert_soup("<br>", "<br/>")
|
||||
|
||||
def test_br_is_always_empty_element_tag(self):
|
||||
"""A <br> tag is designated as an empty-element tag.
|
||||
|
@ -450,11 +512,11 @@ class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
two tags, but it should always be an empty-element tag.
|
||||
"""
|
||||
soup = self.soup("<br></br>")
|
||||
self.assertTrue(soup.br.is_empty_element)
|
||||
self.assertEqual(str(soup.br), "<br/>")
|
||||
assert soup.br.is_empty_element
|
||||
assert str(soup.br) == "<br/>"
|
||||
|
||||
def test_nested_formatting_elements(self):
|
||||
self.assertSoupEquals("<em><em></em></em>")
|
||||
self.assert_soup("<em><em></em></em>")
|
||||
|
||||
def test_double_head(self):
|
||||
html = '''<!DOCTYPE html>
|
||||
|
@ -471,22 +533,22 @@ Hello, world!
|
|||
</html>
|
||||
'''
|
||||
soup = self.soup(html)
|
||||
self.assertEqual("text/javascript", soup.find('script')['type'])
|
||||
assert "text/javascript" == soup.find('script')['type']
|
||||
|
||||
def test_comment(self):
|
||||
# Comments are represented as Comment objects.
|
||||
markup = "<p>foo<!--foobar-->baz</p>"
|
||||
self.assertSoupEquals(markup)
|
||||
self.assert_soup(markup)
|
||||
|
||||
soup = self.soup(markup)
|
||||
comment = soup.find(text="foobar")
|
||||
self.assertEqual(comment.__class__, Comment)
|
||||
comment = soup.find(string="foobar")
|
||||
assert comment.__class__ == Comment
|
||||
|
||||
# The comment is properly integrated into the tree.
|
||||
foo = soup.find(text="foo")
|
||||
self.assertEqual(comment, foo.next_element)
|
||||
baz = soup.find(text="baz")
|
||||
self.assertEqual(comment, baz.previous_element)
|
||||
foo = soup.find(string="foo")
|
||||
assert comment == foo.next_element
|
||||
baz = soup.find(string="baz")
|
||||
assert comment == baz.previous_element
|
||||
|
||||
def test_preserved_whitespace_in_pre_and_textarea(self):
|
||||
"""Whitespace must be preserved in <pre> and <textarea> tags,
|
||||
|
@ -494,35 +556,35 @@ Hello, world!
|
|||
"""
|
||||
pre_markup = "<pre> </pre>"
|
||||
textarea_markup = "<textarea> woo\nwoo </textarea>"
|
||||
self.assertSoupEquals(pre_markup)
|
||||
self.assertSoupEquals(textarea_markup)
|
||||
self.assert_soup(pre_markup)
|
||||
self.assert_soup(textarea_markup)
|
||||
|
||||
soup = self.soup(pre_markup)
|
||||
self.assertEqual(soup.pre.prettify(), pre_markup)
|
||||
assert soup.pre.prettify() == pre_markup
|
||||
|
||||
soup = self.soup(textarea_markup)
|
||||
self.assertEqual(soup.textarea.prettify(), textarea_markup)
|
||||
assert soup.textarea.prettify() == textarea_markup
|
||||
|
||||
soup = self.soup("<textarea></textarea>")
|
||||
self.assertEqual(soup.textarea.prettify(), "<textarea></textarea>")
|
||||
assert soup.textarea.prettify() == "<textarea></textarea>"
|
||||
|
||||
def test_nested_inline_elements(self):
|
||||
"""Inline elements can be nested indefinitely."""
|
||||
b_tag = "<b>Inside a B tag</b>"
|
||||
self.assertSoupEquals(b_tag)
|
||||
self.assert_soup(b_tag)
|
||||
|
||||
nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
|
||||
self.assertSoupEquals(nested_b_tag)
|
||||
self.assert_soup(nested_b_tag)
|
||||
|
||||
double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
|
||||
self.assertSoupEquals(nested_b_tag)
|
||||
self.assert_soup(nested_b_tag)
|
||||
|
||||
def test_nested_block_level_elements(self):
|
||||
"""Block elements can be nested."""
|
||||
soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
|
||||
blockquote = soup.blockquote
|
||||
self.assertEqual(blockquote.p.b.string, 'Foo')
|
||||
self.assertEqual(blockquote.b.string, 'Foo')
|
||||
assert blockquote.p.b.string == 'Foo'
|
||||
assert blockquote.b.string == 'Foo'
|
||||
|
||||
def test_correctly_nested_tables(self):
|
||||
"""One table can go inside another one."""
|
||||
|
@ -533,13 +595,13 @@ Hello, world!
|
|||
'<tr><td>foo</td></tr>'
|
||||
'</table></td>')
|
||||
|
||||
self.assertSoupEquals(
|
||||
self.assert_soup(
|
||||
markup,
|
||||
'<table id="1"><tr><td>Here\'s another table:'
|
||||
'<table id="2"><tr><td>foo</td></tr></table>'
|
||||
'</td></tr></table>')
|
||||
|
||||
self.assertSoupEquals(
|
||||
self.assert_soup(
|
||||
"<table><thead><tr><td>Foo</td></tr></thead>"
|
||||
"<tbody><tr><td>Bar</td></tr></tbody>"
|
||||
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
|
||||
|
@ -550,11 +612,11 @@ Hello, world!
|
|||
|
||||
markup = '<div class=" foo bar "></a>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(['foo', 'bar'], soup.div['class'])
|
||||
assert ['foo', 'bar'] == soup.div['class']
|
||||
|
||||
# If you search by the literal name of the class it's like the whitespace
|
||||
# wasn't there.
|
||||
self.assertEqual(soup.div, soup.find('div', class_="foo bar"))
|
||||
assert soup.div == soup.find('div', class_="foo bar")
|
||||
|
||||
def test_deeply_nested_multivalued_attribute(self):
|
||||
# html5lib can set the attributes of the same tag many times
|
||||
|
@ -562,7 +624,7 @@ Hello, world!
|
|||
# multivalued attributes.
|
||||
markup = '<table><div><div class="css"></div></div></table>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(["css"], soup.div.div['class'])
|
||||
assert ["css"] == soup.div.div['class']
|
||||
|
||||
def test_multivalued_attribute_on_html(self):
|
||||
# html5lib uses a different API to set the attributes ot the
|
||||
|
@ -570,21 +632,21 @@ Hello, world!
|
|||
# attributes.
|
||||
markup = '<html class="a b"></html>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(["a", "b"], soup.html['class'])
|
||||
assert ["a", "b"] == soup.html['class']
|
||||
|
||||
def test_angle_brackets_in_attribute_values_are_escaped(self):
|
||||
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
|
||||
self.assert_soup('<a b="<a>"></a>', '<a b="<a>"></a>')
|
||||
|
||||
def test_strings_resembling_character_entity_references(self):
|
||||
# "&T" and "&p" look like incomplete character entities, but they are
|
||||
# not.
|
||||
self.assertSoupEquals(
|
||||
self.assert_soup(
|
||||
"<p>• AT&T is in the s&p 500</p>",
|
||||
"<p>\u2022 AT&T is in the s&p 500</p>"
|
||||
)
|
||||
|
||||
def test_apos_entity(self):
|
||||
self.assertSoupEquals(
|
||||
self.assert_soup(
|
||||
"<p>Bob's Bar</p>",
|
||||
"<p>Bob's Bar</p>",
|
||||
)
|
||||
|
@ -599,45 +661,45 @@ Hello, world!
|
|||
# characters.
|
||||
markup = "<p>“Hello” -☃</p>"
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual("“Hello” -☃", soup.p.string)
|
||||
assert "“Hello” -☃" == soup.p.string
|
||||
|
||||
def test_entities_in_attributes_converted_to_unicode(self):
|
||||
expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assertSoupEquals('<p id="piñata"></p>', expect)
|
||||
self.assert_soup('<p id="piñata"></p>', expect)
|
||||
self.assert_soup('<p id="piñata"></p>', expect)
|
||||
self.assert_soup('<p id="piñata"></p>', expect)
|
||||
self.assert_soup('<p id="piñata"></p>', expect)
|
||||
|
||||
def test_entities_in_text_converted_to_unicode(self):
|
||||
expect = '<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assertSoupEquals("<p>piñata</p>", expect)
|
||||
self.assert_soup("<p>piñata</p>", expect)
|
||||
self.assert_soup("<p>piñata</p>", expect)
|
||||
self.assert_soup("<p>piñata</p>", expect)
|
||||
self.assert_soup("<p>piñata</p>", expect)
|
||||
|
||||
def test_quot_entity_converted_to_quotation_mark(self):
|
||||
self.assertSoupEquals("<p>I said "good day!"</p>",
|
||||
self.assert_soup("<p>I said "good day!"</p>",
|
||||
'<p>I said "good day!"</p>')
|
||||
|
||||
def test_out_of_range_entity(self):
|
||||
expect = "\N{REPLACEMENT CHARACTER}"
|
||||
self.assertSoupEquals("�", expect)
|
||||
self.assertSoupEquals("�", expect)
|
||||
self.assertSoupEquals("�", expect)
|
||||
self.assert_soup("�", expect)
|
||||
self.assert_soup("�", expect)
|
||||
self.assert_soup("�", expect)
|
||||
|
||||
def test_multipart_strings(self):
|
||||
"Mostly to prevent a recurrence of a bug in the html5lib treebuilder."
|
||||
soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
|
||||
self.assertEqual("p", soup.h2.string.next_element.name)
|
||||
self.assertEqual("p", soup.p.name)
|
||||
assert "p" == soup.h2.string.next_element.name
|
||||
assert "p" == soup.p.name
|
||||
self.assertConnectedness(soup)
|
||||
|
||||
def test_empty_element_tags(self):
|
||||
"""Verify consistent handling of empty-element tags,
|
||||
no matter how they come in through the markup.
|
||||
"""
|
||||
self.assertSoupEquals('<br/><br/><br/>', "<br/><br/><br/>")
|
||||
self.assertSoupEquals('<br /><br /><br />', "<br/><br/><br/>")
|
||||
self.assert_soup('<br/><br/><br/>', "<br/><br/><br/>")
|
||||
self.assert_soup('<br /><br /><br />', "<br/><br/><br/>")
|
||||
|
||||
def test_head_tag_between_head_and_body(self):
|
||||
"Prevent recurrence of a bug in the html5lib treebuilder."
|
||||
|
@ -647,7 +709,7 @@ Hello, world!
|
|||
</html>
|
||||
"""
|
||||
soup = self.soup(content)
|
||||
self.assertNotEqual(None, soup.html.body)
|
||||
assert soup.html.body is not None
|
||||
self.assertConnectedness(soup)
|
||||
|
||||
def test_multiple_copies_of_a_tag(self):
|
||||
|
@ -674,18 +736,16 @@ Hello, world!
|
|||
|
||||
markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode())
|
||||
assert markup == soup.encode()
|
||||
html = soup.html
|
||||
self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns'])
|
||||
self.assertEqual(
|
||||
'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml'])
|
||||
self.assertEqual(
|
||||
'http://www.w3.org/2000/svg', soup.html['xmlns:svg'])
|
||||
assert 'http://www.w3.org/1999/xhtml' == soup.html['xmlns']
|
||||
assert 'http://www.w3.org/1998/Math/MathML' == soup.html['xmlns:mathml']
|
||||
assert 'http://www.w3.org/2000/svg' == soup.html['xmlns:svg']
|
||||
|
||||
def test_multivalued_attribute_value_becomes_list(self):
|
||||
markup = b'<a class="foo bar">'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(['foo', 'bar'], soup.a['class'])
|
||||
assert ['foo', 'bar'] == soup.a['class']
|
||||
|
||||
#
|
||||
# Generally speaking, tests below this point are more tests of
|
||||
|
@ -700,67 +760,65 @@ Hello, world!
|
|||
# encoding found in the declaration! The horror!
|
||||
markup = '<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual('Sacr\xe9 bleu!', soup.body.string)
|
||||
assert 'Sacr\xe9 bleu!' == soup.body.string
|
||||
|
||||
def test_soupstrainer(self):
|
||||
"""Parsers should be able to work with SoupStrainers."""
|
||||
strainer = SoupStrainer("b")
|
||||
soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
|
||||
parse_only=strainer)
|
||||
self.assertEqual(soup.decode(), "<b>bold</b>")
|
||||
assert soup.decode() == "<b>bold</b>"
|
||||
|
||||
def test_single_quote_attribute_values_become_double_quotes(self):
|
||||
self.assertSoupEquals("<foo attr='bar'></foo>",
|
||||
self.assert_soup("<foo attr='bar'></foo>",
|
||||
'<foo attr="bar"></foo>')
|
||||
|
||||
def test_attribute_values_with_nested_quotes_are_left_alone(self):
|
||||
text = """<foo attr='bar "brawls" happen'>a</foo>"""
|
||||
self.assertSoupEquals(text)
|
||||
self.assert_soup(text)
|
||||
|
||||
def test_attribute_values_with_double_nested_quotes_get_quoted(self):
|
||||
text = """<foo attr='bar "brawls" happen'>a</foo>"""
|
||||
soup = self.soup(text)
|
||||
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
|
||||
self.assertSoupEquals(
|
||||
self.assert_soup(
|
||||
soup.foo.decode(),
|
||||
"""<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""")
|
||||
|
||||
def test_ampersand_in_attribute_value_gets_escaped(self):
|
||||
self.assertSoupEquals('<this is="really messed up & stuff"></this>',
|
||||
self.assert_soup('<this is="really messed up & stuff"></this>',
|
||||
'<this is="really messed up & stuff"></this>')
|
||||
|
||||
self.assertSoupEquals(
|
||||
self.assert_soup(
|
||||
'<a href="http://example.org?a=1&b=2;3">foo</a>',
|
||||
'<a href="http://example.org?a=1&b=2;3">foo</a>')
|
||||
|
||||
def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
|
||||
self.assertSoupEquals('<a href="http://example.org?a=1&b=2;3"></a>')
|
||||
self.assert_soup('<a href="http://example.org?a=1&b=2;3"></a>')
|
||||
|
||||
def test_entities_in_strings_converted_during_parsing(self):
|
||||
# Both XML and HTML entities are converted to Unicode characters
|
||||
# during parsing.
|
||||
text = "<p><<sacré bleu!>></p>"
|
||||
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>"
|
||||
self.assertSoupEquals(text, expected)
|
||||
self.assert_soup(text, expected)
|
||||
|
||||
def test_smart_quotes_converted_on_the_way_in(self):
|
||||
# Microsoft smart quotes are converted to Unicode characters during
|
||||
# parsing.
|
||||
quote = b"<p>\x91Foo\x92</p>"
|
||||
soup = self.soup(quote)
|
||||
self.assertEqual(
|
||||
soup.p.string,
|
||||
"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
|
||||
assert soup.p.string == "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}"
|
||||
|
||||
def test_non_breaking_spaces_converted_on_the_way_in(self):
|
||||
soup = self.soup("<a> </a>")
|
||||
self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2)
|
||||
assert soup.a.string == "\N{NO-BREAK SPACE}" * 2
|
||||
|
||||
def test_entities_converted_on_the_way_out(self):
|
||||
text = "<p><<sacré bleu!>></p>"
|
||||
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode("utf-8")
|
||||
soup = self.soup(text)
|
||||
self.assertEqual(soup.p.encode("utf-8"), expected)
|
||||
assert soup.p.encode("utf-8") == expected
|
||||
|
||||
def test_real_iso_latin_document(self):
|
||||
# Smoke test of interrelated functionality, using an
|
||||
|
@ -787,7 +845,7 @@ Hello, world!
|
|||
expected = expected.encode("utf-8")
|
||||
|
||||
# Ta-da!
|
||||
self.assertEqual(result, expected)
|
||||
assert result == expected
|
||||
|
||||
def test_real_shift_jis_document(self):
|
||||
# Smoke test to make sure the parser can handle a document in
|
||||
|
@ -803,8 +861,8 @@ Hello, world!
|
|||
|
||||
# Make sure the parse tree is correctly encoded to various
|
||||
# encodings.
|
||||
self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
|
||||
self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
|
||||
assert soup.encode("utf-8") == unicode_html.encode("utf-8")
|
||||
assert soup.encode("euc_jp") == unicode_html.encode("euc_jp")
|
||||
|
||||
def test_real_hebrew_document(self):
|
||||
# A real-world test to make sure we can convert ISO-8859-9 (a
|
||||
|
@ -815,9 +873,9 @@ Hello, world!
|
|||
# Some tree builders call it iso8859-8, others call it iso-8859-9.
|
||||
# That's not a difference we really care about.
|
||||
assert soup.original_encoding in ('iso8859-8', 'iso-8859-8')
|
||||
self.assertEqual(
|
||||
soup.encode('utf-8'),
|
||||
hebrew_document.decode("iso8859-8").encode("utf-8"))
|
||||
assert soup.encode('utf-8') == (
|
||||
hebrew_document.decode("iso8859-8").encode("utf-8")
|
||||
)
|
||||
|
||||
def test_meta_tag_reflects_current_encoding(self):
|
||||
# Here's the <meta> tag saying that a document is
|
||||
|
@ -835,14 +893,14 @@ Hello, world!
|
|||
# Parse the document, and the charset is seemingly unaffected.
|
||||
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
|
||||
content = parsed_meta['content']
|
||||
self.assertEqual('text/html; charset=x-sjis', content)
|
||||
assert 'text/html; charset=x-sjis' == content
|
||||
|
||||
# But that value is actually a ContentMetaAttributeValue object.
|
||||
self.assertTrue(isinstance(content, ContentMetaAttributeValue))
|
||||
assert isinstance(content, ContentMetaAttributeValue)
|
||||
|
||||
# And it will take on a value that reflects its current
|
||||
# encoding.
|
||||
self.assertEqual('text/html; charset=utf8', content.encode("utf8"))
|
||||
assert 'text/html; charset=utf8' == content.encode("utf8")
|
||||
|
||||
# For the rest of the story, see TestSubstitutions in
|
||||
# test_tree.py.
|
||||
|
@ -862,14 +920,14 @@ Hello, world!
|
|||
# Parse the document, and the charset is seemingly unaffected.
|
||||
parsed_meta = soup.find('meta', id="encoding")
|
||||
charset = parsed_meta['charset']
|
||||
self.assertEqual('x-sjis', charset)
|
||||
assert 'x-sjis' == charset
|
||||
|
||||
# But that value is actually a CharsetMetaAttributeValue object.
|
||||
self.assertTrue(isinstance(charset, CharsetMetaAttributeValue))
|
||||
assert isinstance(charset, CharsetMetaAttributeValue)
|
||||
|
||||
# And it will take on a value that reflects its current
|
||||
# encoding.
|
||||
self.assertEqual('utf8', charset.encode("utf8"))
|
||||
assert 'utf8' == charset.encode("utf8")
|
||||
|
||||
def test_python_specific_encodings_not_used_in_charset(self):
|
||||
# You can encode an HTML document using a Python-specific
|
||||
|
@ -897,7 +955,7 @@ Hello, world!
|
|||
def test_tag_with_no_attributes_can_have_attributes_added(self):
|
||||
data = self.soup("<a>text</a>")
|
||||
data.a['foo'] = 'bar'
|
||||
self.assertEqual('<a foo="bar">text</a>', data.a.decode())
|
||||
assert '<a foo="bar">text</a>' == data.a.decode()
|
||||
|
||||
def test_closing_tag_with_no_opening_tag(self):
|
||||
# Without BeautifulSoup.open_tag_counter, the </span> tag will
|
||||
|
@ -905,9 +963,7 @@ Hello, world!
|
|||
# for a <span> tag that wasn't there. The result is that 'text2'
|
||||
# will show up outside the body of the document.
|
||||
soup = self.soup("<body><div><p>text1</p></span>text2</div></body>")
|
||||
self.assertEqual(
|
||||
"<body><div><p>text1</p>text2</div></body>", soup.body.decode()
|
||||
)
|
||||
assert "<body><div><p>text1</p>text2</div></body>" == soup.body.decode()
|
||||
|
||||
def test_worst_case(self):
|
||||
"""Test the worst case (currently) for linking issues."""
|
||||
|
@ -924,18 +980,17 @@ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
tree = self.soup("<a><b>foo</a>")
|
||||
dumped = pickle.dumps(tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
self.assertEqual(loaded.__class__, BeautifulSoup)
|
||||
self.assertEqual(loaded.decode(), tree.decode())
|
||||
assert loaded.__class__ == BeautifulSoup
|
||||
assert loaded.decode() == tree.decode()
|
||||
|
||||
def test_docstring_generated(self):
|
||||
soup = self.soup("<root/>")
|
||||
self.assertEqual(
|
||||
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
|
||||
assert soup.encode() == b'<?xml version="1.0" encoding="utf-8"?>\n<root/>'
|
||||
|
||||
def test_xml_declaration(self):
|
||||
markup = b"""<?xml version="1.0" encoding="utf8"?>\n<foo/>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode("utf8"))
|
||||
assert markup == soup.encode("utf8")
|
||||
|
||||
def test_python_specific_encodings_not_used_in_xml_declaration(self):
|
||||
# You can encode an XML document using a Python-specific
|
||||
|
@ -959,7 +1014,7 @@ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
def test_processing_instruction(self):
|
||||
markup = b"""<?xml version="1.0" encoding="utf8"?>\n<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode("utf8"))
|
||||
assert markup == soup.encode("utf8")
|
||||
|
||||
def test_real_xhtml_document(self):
|
||||
"""A real XHTML document should come out *exactly* the same as it went in."""
|
||||
|
@ -970,8 +1025,7 @@ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
<body>Goodbye.</body>
|
||||
</html>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(
|
||||
soup.encode("utf-8"), markup)
|
||||
assert soup.encode("utf-8") == markup
|
||||
|
||||
def test_nested_namespaces(self):
|
||||
doc = b"""<?xml version="1.0" encoding="utf-8"?>
|
||||
|
@ -982,7 +1036,7 @@ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
</child>
|
||||
</parent>"""
|
||||
soup = self.soup(doc)
|
||||
self.assertEqual(doc, soup.encode())
|
||||
assert doc == soup.encode()
|
||||
|
||||
def test_formatter_processes_script_tag_for_xml_documents(self):
|
||||
doc = """
|
||||
|
@ -994,24 +1048,26 @@ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
# it later.
|
||||
soup.script.string = 'console.log("< < hey > > ");'
|
||||
encoded = soup.encode()
|
||||
self.assertTrue(b"< < hey > >" in encoded)
|
||||
assert b"< < hey > >" in encoded
|
||||
|
||||
def test_can_parse_unicode_document(self):
|
||||
markup = '<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual('Sacr\xe9 bleu!', soup.root.string)
|
||||
assert 'Sacr\xe9 bleu!' == soup.root.string
|
||||
|
||||
def test_can_parse_unicode_document_begining_with_bom(self):
|
||||
markup = '\N{BYTE ORDER MARK}<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
|
||||
soup = self.soup(markup)
|
||||
assert 'Sacr\xe9 bleu!' == soup.root.string
|
||||
|
||||
def test_popping_namespaced_tag(self):
|
||||
markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(
|
||||
str(soup.rss), markup)
|
||||
assert str(soup.rss) == markup
|
||||
|
||||
def test_docstring_includes_correct_encoding(self):
|
||||
soup = self.soup("<root/>")
|
||||
self.assertEqual(
|
||||
soup.encode("latin1"),
|
||||
b'<?xml version="1.0" encoding="latin1"?>\n<root/>')
|
||||
assert soup.encode("latin1") == b'<?xml version="1.0" encoding="latin1"?>\n<root/>'
|
||||
|
||||
def test_large_xml_document(self):
|
||||
"""A large XML document should come out the same as it went in."""
|
||||
|
@ -1019,34 +1075,33 @@ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
+ b'0' * (2**12)
|
||||
+ b'</root>')
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(soup.encode("utf-8"), markup)
|
||||
|
||||
assert soup.encode("utf-8") == markup
|
||||
|
||||
def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
|
||||
self.assertSoupEquals("<p>", "<p/>")
|
||||
self.assertSoupEquals("<p>foo</p>")
|
||||
self.assert_soup("<p>", "<p/>")
|
||||
self.assert_soup("<p>foo</p>")
|
||||
|
||||
def test_namespaces_are_preserved(self):
|
||||
markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
|
||||
soup = self.soup(markup)
|
||||
root = soup.root
|
||||
self.assertEqual("http://example.com/", root['xmlns:a'])
|
||||
self.assertEqual("http://example.net/", root['xmlns:b'])
|
||||
assert "http://example.com/" == root['xmlns:a']
|
||||
assert "http://example.net/" == root['xmlns:b']
|
||||
|
||||
def test_closing_namespaced_tag(self):
|
||||
markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(str(soup.p), markup)
|
||||
assert str(soup.p) == markup
|
||||
|
||||
def test_namespaced_attributes(self):
|
||||
markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(str(soup.foo), markup)
|
||||
assert str(soup.foo) == markup
|
||||
|
||||
def test_namespaced_attributes_xml_namespace(self):
|
||||
markup = '<foo xml:lang="fr">bar</foo>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(str(soup.foo), markup)
|
||||
assert str(soup.foo) == markup
|
||||
|
||||
def test_find_by_prefixed_name(self):
|
||||
doc = """<?xml version="1.0" encoding="utf-8"?>
|
||||
|
@ -1061,14 +1116,14 @@ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
soup = self.soup(doc)
|
||||
|
||||
# There are three <tag> tags.
|
||||
self.assertEqual(3, len(soup.find_all('tag')))
|
||||
assert 3 == len(soup.find_all('tag'))
|
||||
|
||||
# But two of them are ns1:tag and one of them is ns2:tag.
|
||||
self.assertEqual(2, len(soup.find_all('ns1:tag')))
|
||||
self.assertEqual(1, len(soup.find_all('ns2:tag')))
|
||||
assert 2 == len(soup.find_all('ns1:tag'))
|
||||
assert 1 == len(soup.find_all('ns2:tag'))
|
||||
|
||||
self.assertEqual(1, len(soup.find_all('ns2:tag', key='value')))
|
||||
self.assertEqual(3, len(soup.find_all(['ns1:tag', 'ns2:tag'])))
|
||||
assert 1, len(soup.find_all('ns2:tag', key='value'))
|
||||
assert 3, len(soup.find_all(['ns1:tag', 'ns2:tag']))
|
||||
|
||||
def test_copy_tag_preserves_namespace(self):
|
||||
xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
|
||||
|
@ -1079,7 +1134,7 @@ class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest):
|
|||
duplicate = copy.copy(tag)
|
||||
|
||||
# The two tags have the same namespace prefix.
|
||||
self.assertEqual(tag.prefix, duplicate.prefix)
|
||||
assert tag.prefix == duplicate.prefix
|
||||
|
||||
def test_worst_case(self):
|
||||
"""Test the worst case (currently) for linking issues."""
|
||||
|
@ -1099,29 +1154,29 @@ class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
|
|||
def test_html_tags_have_namespace(self):
|
||||
markup = "<a>"
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace)
|
||||
assert "http://www.w3.org/1999/xhtml" == soup.a.namespace
|
||||
|
||||
def test_svg_tags_have_namespace(self):
|
||||
markup = '<svg><circle/></svg>'
|
||||
soup = self.soup(markup)
|
||||
namespace = "http://www.w3.org/2000/svg"
|
||||
self.assertEqual(namespace, soup.svg.namespace)
|
||||
self.assertEqual(namespace, soup.circle.namespace)
|
||||
assert namespace == soup.svg.namespace
|
||||
assert namespace == soup.circle.namespace
|
||||
|
||||
|
||||
def test_mathml_tags_have_namespace(self):
|
||||
markup = '<math><msqrt>5</msqrt></math>'
|
||||
soup = self.soup(markup)
|
||||
namespace = 'http://www.w3.org/1998/Math/MathML'
|
||||
self.assertEqual(namespace, soup.math.namespace)
|
||||
self.assertEqual(namespace, soup.msqrt.namespace)
|
||||
assert namespace == soup.math.namespace
|
||||
assert namespace == soup.msqrt.namespace
|
||||
|
||||
def test_xml_declaration_becomes_comment(self):
|
||||
markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
|
||||
soup = self.soup(markup)
|
||||
self.assertTrue(isinstance(soup.contents[0], Comment))
|
||||
self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?')
|
||||
self.assertEqual("html", soup.contents[0].next_element.name)
|
||||
assert isinstance(soup.contents[0], Comment)
|
||||
assert soup.contents[0] == '?xml version="1.0" encoding="utf-8"?'
|
||||
assert "html" == soup.contents[0].next_element.name
|
||||
|
||||
def skipIf(condition, reason):
|
||||
def nothing(test, *args, **kwargs):
|
29
lib/bs4/tests/test_builder.py
Normal file
29
lib/bs4/tests/test_builder.py
Normal file
|
@ -0,0 +1,29 @@
|
|||
import pytest
|
||||
from unittest.mock import patch
|
||||
from bs4.builder import DetectsXMLParsedAsHTML
|
||||
|
||||
class TestDetectsXMLParsedAsHTML(object):
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"markup,looks_like_xml",
|
||||
[("No xml declaration", False),
|
||||
("<html>obviously HTML</html", False),
|
||||
("<?xml ><html>Actually XHTML</html>", False),
|
||||
("<?xml> < html>Tricky XHTML</html>", False),
|
||||
("<?xml ><no-html-tag>", True),
|
||||
]
|
||||
)
|
||||
def test_warn_if_markup_looks_like_xml(self, markup, looks_like_xml):
|
||||
# Test of our ability to guess at whether markup looks XML-ish
|
||||
# _and_ not HTML-ish.
|
||||
with patch('bs4.builder.DetectsXMLParsedAsHTML._warn') as mock:
|
||||
for data in markup, markup.encode('utf8'):
|
||||
result = DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml(
|
||||
data
|
||||
)
|
||||
assert result == looks_like_xml
|
||||
if looks_like_xml:
|
||||
assert mock.called
|
||||
else:
|
||||
assert not mock.called
|
||||
mock.reset_mock()
|
136
lib/bs4/tests/test_builder_registry.py
Normal file
136
lib/bs4/tests/test_builder_registry.py
Normal file
|
@ -0,0 +1,136 @@
|
|||
"""Tests of the builder registry."""
|
||||
|
||||
import pytest
|
||||
import warnings
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.builder import (
|
||||
builder_registry as registry,
|
||||
HTMLParserTreeBuilder,
|
||||
TreeBuilderRegistry,
|
||||
)
|
||||
|
||||
try:
|
||||
from bs4.builder import HTML5TreeBuilder
|
||||
HTML5LIB_PRESENT = True
|
||||
except ImportError:
|
||||
HTML5LIB_PRESENT = False
|
||||
|
||||
try:
|
||||
from bs4.builder import (
|
||||
LXMLTreeBuilderForXML,
|
||||
LXMLTreeBuilder,
|
||||
)
|
||||
LXML_PRESENT = True
|
||||
except ImportError:
|
||||
LXML_PRESENT = False
|
||||
|
||||
|
||||
class TestBuiltInRegistry(object):
|
||||
"""Test the built-in registry with the default builders registered."""
|
||||
|
||||
def test_combination(self):
|
||||
assert registry.lookup('strict', 'html') == HTMLParserTreeBuilder
|
||||
if LXML_PRESENT:
|
||||
assert registry.lookup('fast', 'html') == LXMLTreeBuilder
|
||||
assert registry.lookup('permissive', 'xml') == LXMLTreeBuilderForXML
|
||||
if HTML5LIB_PRESENT:
|
||||
assert registry.lookup('html5lib', 'html') == HTML5TreeBuilder
|
||||
|
||||
def test_lookup_by_markup_type(self):
|
||||
if LXML_PRESENT:
|
||||
assert registry.lookup('html') == LXMLTreeBuilder
|
||||
assert registry.lookup('xml') == LXMLTreeBuilderForXML
|
||||
else:
|
||||
assert registry.lookup('xml') == None
|
||||
if HTML5LIB_PRESENT:
|
||||
assert registry.lookup('html') == HTML5TreeBuilder
|
||||
else:
|
||||
assert registry.lookup('html') == HTMLParserTreeBuilder
|
||||
|
||||
def test_named_library(self):
|
||||
if LXML_PRESENT:
|
||||
assert registry.lookup('lxml', 'xml') == LXMLTreeBuilderForXML
|
||||
assert registry.lookup('lxml', 'html') == LXMLTreeBuilder
|
||||
if HTML5LIB_PRESENT:
|
||||
assert registry.lookup('html5lib') == HTML5TreeBuilder
|
||||
|
||||
assert registry.lookup('html.parser') == HTMLParserTreeBuilder
|
||||
|
||||
def test_beautifulsoup_constructor_does_lookup(self):
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
# This will create a warning about not explicitly
|
||||
# specifying a parser, but we'll ignore it.
|
||||
|
||||
# You can pass in a string.
|
||||
BeautifulSoup("", features="html")
|
||||
# Or a list of strings.
|
||||
BeautifulSoup("", features=["html", "fast"])
|
||||
pass
|
||||
|
||||
# You'll get an exception if BS can't find an appropriate
|
||||
# builder.
|
||||
with pytest.raises(ValueError):
|
||||
BeautifulSoup("", features="no-such-feature")
|
||||
|
||||
class TestRegistry(object):
|
||||
"""Test the TreeBuilderRegistry class in general."""
|
||||
|
||||
def setup_method(self):
|
||||
self.registry = TreeBuilderRegistry()
|
||||
|
||||
def builder_for_features(self, *feature_list):
|
||||
cls = type('Builder_' + '_'.join(feature_list),
|
||||
(object,), {'features' : feature_list})
|
||||
|
||||
self.registry.register(cls)
|
||||
return cls
|
||||
|
||||
def test_register_with_no_features(self):
|
||||
builder = self.builder_for_features()
|
||||
|
||||
# Since the builder advertises no features, you can't find it
|
||||
# by looking up features.
|
||||
assert self.registry.lookup('foo') is None
|
||||
|
||||
# But you can find it by doing a lookup with no features, if
|
||||
# this happens to be the only registered builder.
|
||||
assert self.registry.lookup() == builder
|
||||
|
||||
def test_register_with_features_makes_lookup_succeed(self):
|
||||
builder = self.builder_for_features('foo', 'bar')
|
||||
assert self.registry.lookup('foo') is builder
|
||||
assert self.registry.lookup('bar') is builder
|
||||
|
||||
def test_lookup_fails_when_no_builder_implements_feature(self):
|
||||
builder = self.builder_for_features('foo', 'bar')
|
||||
assert self.registry.lookup('baz') is None
|
||||
|
||||
def test_lookup_gets_most_recent_registration_when_no_feature_specified(self):
|
||||
builder1 = self.builder_for_features('foo')
|
||||
builder2 = self.builder_for_features('bar')
|
||||
assert self.registry.lookup() == builder2
|
||||
|
||||
def test_lookup_fails_when_no_tree_builders_registered(self):
|
||||
assert self.registry.lookup() is None
|
||||
|
||||
def test_lookup_gets_most_recent_builder_supporting_all_features(self):
|
||||
has_one = self.builder_for_features('foo')
|
||||
has_the_other = self.builder_for_features('bar')
|
||||
has_both_early = self.builder_for_features('foo', 'bar', 'baz')
|
||||
has_both_late = self.builder_for_features('foo', 'bar', 'quux')
|
||||
lacks_one = self.builder_for_features('bar')
|
||||
has_the_other = self.builder_for_features('foo')
|
||||
|
||||
# There are two builders featuring 'foo' and 'bar', but
|
||||
# the one that also features 'quux' was registered later.
|
||||
assert self.registry.lookup('foo', 'bar') == has_both_late
|
||||
|
||||
# There is only one builder featuring 'foo', 'bar', and 'baz'.
|
||||
assert self.registry.lookup('foo', 'bar', 'baz') == has_both_early
|
||||
|
||||
def test_lookup_fails_when_cannot_reconcile_requested_features(self):
|
||||
builder1 = self.builder_for_features('foo', 'bar')
|
||||
builder2 = self.builder_for_features('foo', 'baz')
|
||||
assert self.registry.lookup('bar', 'baz') is None
|
371
lib/bs4/tests/test_dammit.py
Normal file
371
lib/bs4/tests/test_dammit.py
Normal file
|
@ -0,0 +1,371 @@
|
|||
# encoding: utf-8
|
||||
import pytest
|
||||
import logging
|
||||
import bs4
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.dammit import (
|
||||
EntitySubstitution,
|
||||
EncodingDetector,
|
||||
UnicodeDammit,
|
||||
)
|
||||
|
||||
class TestUnicodeDammit(object):
|
||||
"""Standalone tests of UnicodeDammit."""
|
||||
|
||||
def test_unicode_input(self):
|
||||
markup = "I'm already Unicode! \N{SNOWMAN}"
|
||||
dammit = UnicodeDammit(markup)
|
||||
assert dammit.unicode_markup == markup
|
||||
|
||||
def test_smart_quotes_to_unicode(self):
|
||||
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||
dammit = UnicodeDammit(markup)
|
||||
assert dammit.unicode_markup == "<foo>\u2018\u2019\u201c\u201d</foo>"
|
||||
|
||||
def test_smart_quotes_to_xml_entities(self):
|
||||
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||
dammit = UnicodeDammit(markup, smart_quotes_to="xml")
|
||||
assert dammit.unicode_markup == "<foo>‘’“”</foo>"
|
||||
|
||||
def test_smart_quotes_to_html_entities(self):
|
||||
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||
dammit = UnicodeDammit(markup, smart_quotes_to="html")
|
||||
assert dammit.unicode_markup == "<foo>‘’“”</foo>"
|
||||
|
||||
def test_smart_quotes_to_ascii(self):
|
||||
markup = b"<foo>\x91\x92\x93\x94</foo>"
|
||||
dammit = UnicodeDammit(markup, smart_quotes_to="ascii")
|
||||
assert dammit.unicode_markup == """<foo>''""</foo>"""
|
||||
|
||||
def test_detect_utf8(self):
|
||||
utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
|
||||
dammit = UnicodeDammit(utf8)
|
||||
assert dammit.original_encoding.lower() == 'utf-8'
|
||||
assert dammit.unicode_markup == 'Sacr\xe9 bleu! \N{SNOWMAN}'
|
||||
|
||||
def test_convert_hebrew(self):
|
||||
hebrew = b"\xed\xe5\xec\xf9"
|
||||
dammit = UnicodeDammit(hebrew, ["iso-8859-8"])
|
||||
assert dammit.original_encoding.lower() == 'iso-8859-8'
|
||||
assert dammit.unicode_markup == '\u05dd\u05d5\u05dc\u05e9'
|
||||
|
||||
def test_dont_see_smart_quotes_where_there_are_none(self):
|
||||
utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch"
|
||||
dammit = UnicodeDammit(utf_8)
|
||||
assert dammit.original_encoding.lower() == 'utf-8'
|
||||
assert dammit.unicode_markup.encode("utf-8") == utf_8
|
||||
|
||||
def test_ignore_inappropriate_codecs(self):
|
||||
utf8_data = "Räksmörgås".encode("utf-8")
|
||||
dammit = UnicodeDammit(utf8_data, ["iso-8859-8"])
|
||||
assert dammit.original_encoding.lower() == 'utf-8'
|
||||
|
||||
def test_ignore_invalid_codecs(self):
|
||||
utf8_data = "Räksmörgås".encode("utf-8")
|
||||
for bad_encoding in ['.utf8', '...', 'utF---16.!']:
|
||||
dammit = UnicodeDammit(utf8_data, [bad_encoding])
|
||||
assert dammit.original_encoding.lower() == 'utf-8'
|
||||
|
||||
def test_exclude_encodings(self):
|
||||
# This is UTF-8.
|
||||
utf8_data = "Räksmörgås".encode("utf-8")
|
||||
|
||||
# But if we exclude UTF-8 from consideration, the guess is
|
||||
# Windows-1252.
|
||||
dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
|
||||
assert dammit.original_encoding.lower() == 'windows-1252'
|
||||
|
||||
# And if we exclude that, there is no valid guess at all.
|
||||
dammit = UnicodeDammit(
|
||||
utf8_data, exclude_encodings=["utf-8", "windows-1252"])
|
||||
assert dammit.original_encoding == None
|
||||
|
||||
class TestEncodingDetector(object):
|
||||
|
||||
def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
|
||||
detected = EncodingDetector(
|
||||
b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
|
||||
encodings = list(detected.encodings)
|
||||
assert 'utf-\N{REPLACEMENT CHARACTER}' in encodings
|
||||
|
||||
def test_detect_html5_style_meta_tag(self):
|
||||
|
||||
for data in (
|
||||
b'<html><meta charset="euc-jp" /></html>',
|
||||
b"<html><meta charset='euc-jp' /></html>",
|
||||
b"<html><meta charset=euc-jp /></html>",
|
||||
b"<html><meta charset=euc-jp/></html>"):
|
||||
dammit = UnicodeDammit(data, is_html=True)
|
||||
assert "euc-jp" == dammit.original_encoding
|
||||
|
||||
def test_last_ditch_entity_replacement(self):
|
||||
# This is a UTF-8 document that contains bytestrings
|
||||
# completely incompatible with UTF-8 (ie. encoded with some other
|
||||
# encoding).
|
||||
#
|
||||
# Since there is no consistent encoding for the document,
|
||||
# Unicode, Dammit will eventually encode the document as UTF-8
|
||||
# and encode the incompatible characters as REPLACEMENT
|
||||
# CHARACTER.
|
||||
#
|
||||
# If chardet is installed, it will detect that the document
|
||||
# can be converted into ISO-8859-1 without errors. This happens
|
||||
# to be the wrong encoding, but it is a consistent encoding, so the
|
||||
# code we're testing here won't run.
|
||||
#
|
||||
# So we temporarily disable chardet if it's present.
|
||||
doc = b"""\357\273\277<?xml version="1.0" encoding="UTF-8"?>
|
||||
<html><b>\330\250\330\252\330\261</b>
|
||||
<i>\310\322\321\220\312\321\355\344</i></html>"""
|
||||
chardet = bs4.dammit.chardet_dammit
|
||||
logging.disable(logging.WARNING)
|
||||
try:
|
||||
def noop(str):
|
||||
return None
|
||||
bs4.dammit.chardet_dammit = noop
|
||||
dammit = UnicodeDammit(doc)
|
||||
assert True == dammit.contains_replacement_characters
|
||||
assert "\ufffd" in dammit.unicode_markup
|
||||
|
||||
soup = BeautifulSoup(doc, "html.parser")
|
||||
assert soup.contains_replacement_characters
|
||||
finally:
|
||||
logging.disable(logging.NOTSET)
|
||||
bs4.dammit.chardet_dammit = chardet
|
||||
|
||||
def test_byte_order_mark_removed(self):
|
||||
# A document written in UTF-16LE will have its byte order marker stripped.
|
||||
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
|
||||
dammit = UnicodeDammit(data)
|
||||
assert "<a>áé</a>" == dammit.unicode_markup
|
||||
assert "utf-16le" == dammit.original_encoding
|
||||
|
||||
def test_known_definite_versus_user_encodings(self):
|
||||
# The known_definite_encodings are used before sniffing the
|
||||
# byte-order mark; the user_encodings are used afterwards.
|
||||
|
||||
# Here's a document in UTF-16LE.
|
||||
data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00'
|
||||
dammit = UnicodeDammit(data)
|
||||
|
||||
# We can process it as UTF-16 by passing it in as a known
|
||||
# definite encoding.
|
||||
before = UnicodeDammit(data, known_definite_encodings=["utf-16"])
|
||||
assert "utf-16" == before.original_encoding
|
||||
|
||||
# If we pass UTF-18 as a user encoding, it's not even
|
||||
# tried--the encoding sniffed from the byte-order mark takes
|
||||
# precedence.
|
||||
after = UnicodeDammit(data, user_encodings=["utf-8"])
|
||||
assert "utf-16le" == after.original_encoding
|
||||
assert ["utf-16le"] == [x[0] for x in dammit.tried_encodings]
|
||||
|
||||
# Here's a document in ISO-8859-8.
|
||||
hebrew = b"\xed\xe5\xec\xf9"
|
||||
dammit = UnicodeDammit(hebrew, known_definite_encodings=["utf-8"],
|
||||
user_encodings=["iso-8859-8"])
|
||||
|
||||
# The known_definite_encodings don't work, BOM sniffing does
|
||||
# nothing (it only works for a few UTF encodings), but one of
|
||||
# the user_encodings does work.
|
||||
assert "iso-8859-8" == dammit.original_encoding
|
||||
assert ["utf-8", "iso-8859-8"] == [x[0] for x in dammit.tried_encodings]
|
||||
|
||||
def test_deprecated_override_encodings(self):
|
||||
# override_encodings is a deprecated alias for
|
||||
# known_definite_encodings.
|
||||
hebrew = b"\xed\xe5\xec\xf9"
|
||||
dammit = UnicodeDammit(
|
||||
hebrew,
|
||||
known_definite_encodings=["shift-jis"],
|
||||
override_encodings=["utf-8"],
|
||||
user_encodings=["iso-8859-8"],
|
||||
)
|
||||
assert "iso-8859-8" == dammit.original_encoding
|
||||
|
||||
# known_definite_encodings and override_encodings were tried
|
||||
# before user_encodings.
|
||||
assert ["shift-jis", "utf-8", "iso-8859-8"] == (
|
||||
[x[0] for x in dammit.tried_encodings]
|
||||
)
|
||||
|
||||
def test_detwingle(self):
|
||||
# Here's a UTF8 document.
|
||||
utf8 = ("\N{SNOWMAN}" * 3).encode("utf8")
|
||||
|
||||
# Here's a Windows-1252 document.
|
||||
windows_1252 = (
|
||||
"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!"
|
||||
"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252")
|
||||
|
||||
# Through some unholy alchemy, they've been stuck together.
|
||||
doc = utf8 + windows_1252 + utf8
|
||||
|
||||
# The document can't be turned into UTF-8:
|
||||
with pytest.raises(UnicodeDecodeError):
|
||||
doc.decode("utf8")
|
||||
|
||||
# Unicode, Dammit thinks the whole document is Windows-1252,
|
||||
# and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃"
|
||||
|
||||
# But if we run it through fix_embedded_windows_1252, it's fixed:
|
||||
fixed = UnicodeDammit.detwingle(doc)
|
||||
assert "☃☃☃“Hi, I like Windows!”☃☃☃" == fixed.decode("utf8")
|
||||
|
||||
def test_detwingle_ignores_multibyte_characters(self):
|
||||
# Each of these characters has a UTF-8 representation ending
|
||||
# in \x93. \x93 is a smart quote if interpreted as
|
||||
# Windows-1252. But our code knows to skip over multibyte
|
||||
# UTF-8 characters, so they'll survive the process unscathed.
|
||||
for tricky_unicode_char in (
|
||||
"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93'
|
||||
"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93'
|
||||
"\xf0\x90\x90\x93", # This is a CJK character, not sure which one.
|
||||
):
|
||||
input = tricky_unicode_char.encode("utf8")
|
||||
assert input.endswith(b'\x93')
|
||||
output = UnicodeDammit.detwingle(input)
|
||||
assert output == input
|
||||
|
||||
def test_find_declared_encoding(self):
|
||||
# Test our ability to find a declared encoding inside an
|
||||
# XML or HTML document.
|
||||
#
|
||||
# Even if the document comes in as Unicode, it may be
|
||||
# interesting to know what encoding was claimed
|
||||
# originally.
|
||||
|
||||
html_unicode = '<html><head><meta charset="utf-8"></head></html>'
|
||||
html_bytes = html_unicode.encode("ascii")
|
||||
|
||||
xml_unicode= '<?xml version="1.0" encoding="ISO-8859-1" ?>'
|
||||
xml_bytes = xml_unicode.encode("ascii")
|
||||
|
||||
m = EncodingDetector.find_declared_encoding
|
||||
assert m(html_unicode, is_html=False) is None
|
||||
assert "utf-8" == m(html_unicode, is_html=True)
|
||||
assert "utf-8" == m(html_bytes, is_html=True)
|
||||
|
||||
assert "iso-8859-1" == m(xml_unicode)
|
||||
assert "iso-8859-1" == m(xml_bytes)
|
||||
|
||||
# Normally, only the first few kilobytes of a document are checked for
|
||||
# an encoding.
|
||||
spacer = b' ' * 5000
|
||||
assert m(spacer + html_bytes) is None
|
||||
assert m(spacer + xml_bytes) is None
|
||||
|
||||
# But you can tell find_declared_encoding to search an entire
|
||||
# HTML document.
|
||||
assert (
|
||||
m(spacer + html_bytes, is_html=True, search_entire_document=True)
|
||||
== "utf-8"
|
||||
)
|
||||
|
||||
# The XML encoding declaration has to be the very first thing
|
||||
# in the document. We'll allow whitespace before the document
|
||||
# starts, but nothing else.
|
||||
assert m(xml_bytes, search_entire_document=True) == "iso-8859-1"
|
||||
assert m(b' ' + xml_bytes, search_entire_document=True) == "iso-8859-1"
|
||||
assert m(b'a' + xml_bytes, search_entire_document=True) is None
|
||||
|
||||
|
||||
class TestEntitySubstitution(object):
|
||||
"""Standalone tests of the EntitySubstitution class."""
|
||||
def setup_method(self):
|
||||
self.sub = EntitySubstitution
|
||||
|
||||
def test_simple_html_substitution(self):
|
||||
# Unicode characters corresponding to named HTML entites
|
||||
# are substituted, and no others.
|
||||
s = "foo\u2200\N{SNOWMAN}\u00f5bar"
|
||||
assert self.sub.substitute_html(s) == "foo∀\N{SNOWMAN}õbar"
|
||||
|
||||
def test_smart_quote_substitution(self):
|
||||
# MS smart quotes are a common source of frustration, so we
|
||||
# give them a special test.
|
||||
quotes = b"\x91\x92foo\x93\x94"
|
||||
dammit = UnicodeDammit(quotes)
|
||||
assert self.sub.substitute_html(dammit.markup) == "‘’foo“”"
|
||||
|
||||
def test_html5_entity(self):
|
||||
# Some HTML5 entities correspond to single- or multi-character
|
||||
# Unicode sequences.
|
||||
|
||||
for entity, u in (
|
||||
# A few spot checks of our ability to recognize
|
||||
# special character sequences and convert them
|
||||
# to named entities.
|
||||
('⊧', '\u22a7'),
|
||||
('𝔑', '\U0001d511'),
|
||||
('≧̸', '\u2267\u0338'),
|
||||
('¬', '\xac'),
|
||||
('⫬', '\u2aec'),
|
||||
|
||||
# We _could_ convert | to &verbarr;, but we don't, because
|
||||
# | is an ASCII character.
|
||||
('|' '|'),
|
||||
|
||||
# Similarly for the fj ligature, which we could convert to
|
||||
# fj, but we don't.
|
||||
("fj", "fj"),
|
||||
|
||||
# We do convert _these_ ASCII characters to HTML entities,
|
||||
# because that's required to generate valid HTML.
|
||||
('>', '>'),
|
||||
('<', '<'),
|
||||
('&', '&'),
|
||||
):
|
||||
template = '3 %s 4'
|
||||
raw = template % u
|
||||
with_entities = template % entity
|
||||
assert self.sub.substitute_html(raw) == with_entities
|
||||
|
||||
def test_html5_entity_with_variation_selector(self):
|
||||
# Some HTML5 entities correspond either to a single-character
|
||||
# Unicode sequence _or_ to the same character plus U+FE00,
|
||||
# VARIATION SELECTOR 1. We can handle this.
|
||||
data = "fjords \u2294 penguins"
|
||||
markup = "fjords ⊔ penguins"
|
||||
assert self.sub.substitute_html(data) == markup
|
||||
|
||||
data = "fjords \u2294\ufe00 penguins"
|
||||
markup = "fjords ⊔︀ penguins"
|
||||
assert self.sub.substitute_html(data) == markup
|
||||
|
||||
def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self):
|
||||
s = 'Welcome to "my bar"'
|
||||
assert self.sub.substitute_xml(s, False) == s
|
||||
|
||||
def test_xml_attribute_quoting_normally_uses_double_quotes(self):
|
||||
assert self.sub.substitute_xml("Welcome", True) == '"Welcome"'
|
||||
assert self.sub.substitute_xml("Bob's Bar", True) == '"Bob\'s Bar"'
|
||||
|
||||
def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self):
|
||||
s = 'Welcome to "my bar"'
|
||||
assert self.sub.substitute_xml(s, True) == "'Welcome to \"my bar\"'"
|
||||
|
||||
def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self):
|
||||
s = 'Welcome to "Bob\'s Bar"'
|
||||
assert self.sub.substitute_xml(s, True) == '"Welcome to "Bob\'s Bar""'
|
||||
|
||||
def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self):
|
||||
quoted = 'Welcome to "Bob\'s Bar"'
|
||||
assert self.sub.substitute_xml(quoted) == quoted
|
||||
|
||||
def test_xml_quoting_handles_angle_brackets(self):
|
||||
assert self.sub.substitute_xml("foo<bar>") == "foo<bar>"
|
||||
|
||||
def test_xml_quoting_handles_ampersands(self):
|
||||
assert self.sub.substitute_xml("AT&T") == "AT&T"
|
||||
|
||||
def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self):
|
||||
assert self.sub.substitute_xml("ÁT&T") == "&Aacute;T&T"
|
||||
|
||||
def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self):
|
||||
assert self.sub.substitute_xml_containing_entities("ÁT&T") == "ÁT&T"
|
||||
|
||||
def test_quotes_not_html_substituted(self):
|
||||
"""There's no need to do this except inside attribute values."""
|
||||
text = 'Bob\'s "bar"'
|
||||
assert self.sub.substitute_html(text) == text
|
38
lib/bs4/tests/test_docs.py
Normal file
38
lib/bs4/tests/test_docs.py
Normal file
|
@ -0,0 +1,38 @@
|
|||
"Test harness for doctests."
|
||||
|
||||
# TODO: Pretty sure this isn't used and should be deleted.
|
||||
|
||||
# pylint: disable-msg=E0611,W0142
|
||||
|
||||
__metaclass__ = type
|
||||
__all__ = [
|
||||
'additional_tests',
|
||||
]
|
||||
|
||||
import atexit
|
||||
import doctest
|
||||
import os
|
||||
#from pkg_resources import (
|
||||
# resource_filename, resource_exists, resource_listdir, cleanup_resources)
|
||||
import unittest
|
||||
|
||||
DOCTEST_FLAGS = (
|
||||
doctest.ELLIPSIS |
|
||||
doctest.NORMALIZE_WHITESPACE |
|
||||
doctest.REPORT_NDIFF)
|
||||
|
||||
|
||||
# def additional_tests():
|
||||
# "Run the doc tests (README.txt and docs/*, if any exist)"
|
||||
# doctest_files = [
|
||||
# os.path.abspath(resource_filename('bs4', 'README.txt'))]
|
||||
# if resource_exists('bs4', 'docs'):
|
||||
# for name in resource_listdir('bs4', 'docs'):
|
||||
# if name.endswith('.txt'):
|
||||
# doctest_files.append(
|
||||
# os.path.abspath(
|
||||
# resource_filename('bs4', 'docs/%s' % name)))
|
||||
# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS)
|
||||
# atexit.register(cleanup_resources)
|
||||
# return unittest.TestSuite((
|
||||
# doctest.DocFileSuite(*doctest_files, **kwargs)))
|
74
lib/bs4/tests/test_element.py
Normal file
74
lib/bs4/tests/test_element.py
Normal file
|
@ -0,0 +1,74 @@
|
|||
"""Tests of classes in element.py.
|
||||
|
||||
The really big classes -- Tag, PageElement, and NavigableString --
|
||||
are tested in separate files.
|
||||
"""
|
||||
|
||||
from bs4.element import (
|
||||
CharsetMetaAttributeValue,
|
||||
ContentMetaAttributeValue,
|
||||
NamespacedAttribute,
|
||||
)
|
||||
from . import SoupTest
|
||||
|
||||
|
||||
class TestNamedspacedAttribute(object):
|
||||
|
||||
def test_name_may_be_none_or_missing(self):
|
||||
a = NamespacedAttribute("xmlns", None)
|
||||
assert a == "xmlns"
|
||||
|
||||
a = NamespacedAttribute("xmlns", "")
|
||||
assert a == "xmlns"
|
||||
|
||||
a = NamespacedAttribute("xmlns")
|
||||
assert a == "xmlns"
|
||||
|
||||
def test_namespace_may_be_none_or_missing(self):
|
||||
a = NamespacedAttribute(None, "tag")
|
||||
assert a == "tag"
|
||||
|
||||
a = NamespacedAttribute("", "tag")
|
||||
assert a == "tag"
|
||||
|
||||
def test_attribute_is_equivalent_to_colon_separated_string(self):
|
||||
a = NamespacedAttribute("a", "b")
|
||||
assert "a:b" == a
|
||||
|
||||
def test_attributes_are_equivalent_if_prefix_and_name_identical(self):
|
||||
a = NamespacedAttribute("a", "b", "c")
|
||||
b = NamespacedAttribute("a", "b", "c")
|
||||
assert a == b
|
||||
|
||||
# The actual namespace is not considered.
|
||||
c = NamespacedAttribute("a", "b", None)
|
||||
assert a == c
|
||||
|
||||
# But name and prefix are important.
|
||||
d = NamespacedAttribute("a", "z", "c")
|
||||
assert a != d
|
||||
|
||||
e = NamespacedAttribute("z", "b", "c")
|
||||
assert a != e
|
||||
|
||||
|
||||
class TestAttributeValueWithCharsetSubstitution(object):
|
||||
"""Certain attributes are designed to have the charset of the
|
||||
final document substituted into their value.
|
||||
"""
|
||||
|
||||
def test_content_meta_attribute_value(self):
|
||||
# The value of a CharsetMetaAttributeValue is whatever
|
||||
# encoding the string is in.
|
||||
value = CharsetMetaAttributeValue("euc-jp")
|
||||
assert "euc-jp" == value
|
||||
assert "euc-jp" == value.original_value
|
||||
assert "utf8" == value.encode("utf8")
|
||||
assert "ascii" == value.encode("ascii")
|
||||
|
||||
def test_content_meta_attribute_value(self):
|
||||
value = ContentMetaAttributeValue("text/html; charset=euc-jp")
|
||||
assert "text/html; charset=euc-jp" == value
|
||||
assert "text/html; charset=euc-jp" == value.original_value
|
||||
assert "text/html; charset=utf8" == value.encode("utf8")
|
||||
assert "text/html; charset=ascii" == value.encode("ascii")
|
113
lib/bs4/tests/test_formatter.py
Normal file
113
lib/bs4/tests/test_formatter.py
Normal file
|
@ -0,0 +1,113 @@
|
|||
import pytest
|
||||
|
||||
from bs4.element import Tag
|
||||
from bs4.formatter import (
|
||||
Formatter,
|
||||
HTMLFormatter,
|
||||
XMLFormatter,
|
||||
)
|
||||
from . import SoupTest
|
||||
|
||||
class TestFormatter(SoupTest):
|
||||
|
||||
def test_default_attributes(self):
|
||||
# Test the default behavior of Formatter.attributes().
|
||||
formatter = Formatter()
|
||||
tag = Tag(name="tag")
|
||||
tag['b'] = 1
|
||||
tag['a'] = 2
|
||||
|
||||
# Attributes come out sorted by name. In Python 3, attributes
|
||||
# normally come out of a dictionary in the order they were
|
||||
# added.
|
||||
assert [('a', 2), ('b', 1)] == formatter.attributes(tag)
|
||||
|
||||
# This works even if Tag.attrs is None, though this shouldn't
|
||||
# normally happen.
|
||||
tag.attrs = None
|
||||
assert [] == formatter.attributes(tag)
|
||||
|
||||
assert ' ' == formatter.indent
|
||||
|
||||
def test_sort_attributes(self):
|
||||
# Test the ability to override Formatter.attributes() to,
|
||||
# e.g., disable the normal sorting of attributes.
|
||||
class UnsortedFormatter(Formatter):
|
||||
def attributes(self, tag):
|
||||
self.called_with = tag
|
||||
for k, v in sorted(tag.attrs.items()):
|
||||
if k == 'ignore':
|
||||
continue
|
||||
yield k,v
|
||||
|
||||
soup = self.soup('<p cval="1" aval="2" ignore="ignored"></p>')
|
||||
formatter = UnsortedFormatter()
|
||||
decoded = soup.decode(formatter=formatter)
|
||||
|
||||
# attributes() was called on the <p> tag. It filtered out one
|
||||
# attribute and sorted the other two.
|
||||
assert formatter.called_with == soup.p
|
||||
assert '<p aval="2" cval="1"></p>' == decoded
|
||||
|
||||
def test_empty_attributes_are_booleans(self):
|
||||
# Test the behavior of empty_attributes_are_booleans as well
|
||||
# as which Formatters have it enabled.
|
||||
|
||||
for name in ('html', 'minimal', None):
|
||||
formatter = HTMLFormatter.REGISTRY[name]
|
||||
assert False == formatter.empty_attributes_are_booleans
|
||||
|
||||
formatter = XMLFormatter.REGISTRY[None]
|
||||
assert False == formatter.empty_attributes_are_booleans
|
||||
|
||||
formatter = HTMLFormatter.REGISTRY['html5']
|
||||
assert True == formatter.empty_attributes_are_booleans
|
||||
|
||||
# Verify that the constructor sets the value.
|
||||
formatter = Formatter(empty_attributes_are_booleans=True)
|
||||
assert True == formatter.empty_attributes_are_booleans
|
||||
|
||||
# Now demonstrate what it does to markup.
|
||||
for markup in (
|
||||
"<option selected></option>",
|
||||
'<option selected=""></option>'
|
||||
):
|
||||
soup = self.soup(markup)
|
||||
for formatter in ('html', 'minimal', 'xml', None):
|
||||
assert b'<option selected=""></option>' == soup.option.encode(formatter='html')
|
||||
assert b'<option selected></option>' == soup.option.encode(formatter='html5')
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"indent,expect",
|
||||
[
|
||||
(None, '<a>\n<b>\ntext\n</b>\n</a>'),
|
||||
(-1, '<a>\n<b>\ntext\n</b>\n</a>'),
|
||||
(0, '<a>\n<b>\ntext\n</b>\n</a>'),
|
||||
("", '<a>\n<b>\ntext\n</b>\n</a>'),
|
||||
|
||||
(1, '<a>\n <b>\n text\n </b>\n</a>'),
|
||||
(2, '<a>\n <b>\n text\n </b>\n</a>'),
|
||||
|
||||
("\t", '<a>\n\t<b>\n\t\ttext\n\t</b>\n</a>'),
|
||||
('abc', '<a>\nabc<b>\nabcabctext\nabc</b>\n</a>'),
|
||||
|
||||
# Some invalid inputs -- the default behavior is used.
|
||||
(object(), '<a>\n <b>\n text\n </b>\n</a>'),
|
||||
(b'bytes', '<a>\n <b>\n text\n </b>\n</a>'),
|
||||
]
|
||||
)
|
||||
def test_indent(self, indent, expect):
|
||||
# Pretty-print a tree with a Formatter set to
|
||||
# indent in a certain way and verify the results.
|
||||
soup = self.soup("<a><b>text</b></a>")
|
||||
formatter = Formatter(indent=indent)
|
||||
assert soup.prettify(formatter=formatter) == expect
|
||||
|
||||
# Pretty-printing only happens with prettify(), not
|
||||
# encode().
|
||||
assert soup.encode(formatter=formatter) != expect
|
||||
|
||||
def test_default_indent_value(self):
|
||||
formatter = Formatter()
|
||||
assert formatter.indent == ' '
|
||||
|
223
lib/bs4/tests/test_html5lib.py
Normal file
223
lib/bs4/tests/test_html5lib.py
Normal file
|
@ -0,0 +1,223 @@
|
|||
"""Tests to ensure that the html5lib tree builder generates good trees."""
|
||||
|
||||
import warnings
|
||||
|
||||
try:
|
||||
from bs4.builder import HTML5TreeBuilder
|
||||
HTML5LIB_PRESENT = True
|
||||
except ImportError as e:
|
||||
HTML5LIB_PRESENT = False
|
||||
from bs4.element import SoupStrainer
|
||||
from . import (
|
||||
HTML5TreeBuilderSmokeTest,
|
||||
SoupTest,
|
||||
skipIf,
|
||||
)
|
||||
|
||||
@skipIf(
|
||||
not HTML5LIB_PRESENT,
|
||||
"html5lib seems not to be present, not testing its tree builder.")
|
||||
class TestHTML5LibBuilder(SoupTest, HTML5TreeBuilderSmokeTest):
|
||||
"""See ``HTML5TreeBuilderSmokeTest``."""
|
||||
|
||||
@property
|
||||
def default_builder(self):
|
||||
return HTML5TreeBuilder
|
||||
|
||||
def test_soupstrainer(self):
|
||||
# The html5lib tree builder does not support SoupStrainers.
|
||||
strainer = SoupStrainer("b")
|
||||
markup = "<p>A <b>bold</b> statement.</p>"
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup(markup, parse_only=strainer)
|
||||
assert soup.decode() == self.document_for(markup)
|
||||
|
||||
assert "the html5lib tree builder doesn't support parse_only" in str(w[0].message)
|
||||
|
||||
def test_correctly_nested_tables(self):
|
||||
"""html5lib inserts <tbody> tags where other parsers don't."""
|
||||
markup = ('<table id="1">'
|
||||
'<tr>'
|
||||
"<td>Here's another table:"
|
||||
'<table id="2">'
|
||||
'<tr><td>foo</td></tr>'
|
||||
'</table></td>')
|
||||
|
||||
self.assert_soup(
|
||||
markup,
|
||||
'<table id="1"><tbody><tr><td>Here\'s another table:'
|
||||
'<table id="2"><tbody><tr><td>foo</td></tr></tbody></table>'
|
||||
'</td></tr></tbody></table>')
|
||||
|
||||
self.assert_soup(
|
||||
"<table><thead><tr><td>Foo</td></tr></thead>"
|
||||
"<tbody><tr><td>Bar</td></tr></tbody>"
|
||||
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
|
||||
|
||||
def test_xml_declaration_followed_by_doctype(self):
|
||||
markup = '''<?xml version="1.0" encoding="utf-8"?>
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
</head>
|
||||
<body>
|
||||
<p>foo</p>
|
||||
</body>
|
||||
</html>'''
|
||||
soup = self.soup(markup)
|
||||
# Verify that we can reach the <p> tag; this means the tree is connected.
|
||||
assert b"<p>foo</p>" == soup.p.encode()
|
||||
|
||||
def test_reparented_markup(self):
|
||||
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>'
|
||||
soup = self.soup(markup)
|
||||
assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p></body>" == soup.body.decode()
|
||||
assert 2 == len(soup.find_all('p'))
|
||||
|
||||
|
||||
def test_reparented_markup_ends_with_whitespace(self):
|
||||
markup = '<p><em>foo</p>\n<p>bar<a></a></em></p>\n'
|
||||
soup = self.soup(markup)
|
||||
assert "<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>" == soup.body.decode()
|
||||
assert 2 == len(soup.find_all('p'))
|
||||
|
||||
def test_reparented_markup_containing_identical_whitespace_nodes(self):
|
||||
"""Verify that we keep the two whitespace nodes in this
|
||||
document distinct when reparenting the adjacent <tbody> tags.
|
||||
"""
|
||||
markup = '<table> <tbody><tbody><ims></tbody> </table>'
|
||||
soup = self.soup(markup)
|
||||
space1, space2 = soup.find_all(string=' ')
|
||||
tbody1, tbody2 = soup.find_all('tbody')
|
||||
assert space1.next_element is tbody1
|
||||
assert tbody2.next_element is space2
|
||||
|
||||
def test_reparented_markup_containing_children(self):
|
||||
markup = '<div><a>aftermath<p><noscript>target</noscript>aftermath</a></p></div>'
|
||||
soup = self.soup(markup)
|
||||
noscript = soup.noscript
|
||||
assert "target" == noscript.next_element
|
||||
target = soup.find(string='target')
|
||||
|
||||
# The 'aftermath' string was duplicated; we want the second one.
|
||||
final_aftermath = soup.find_all(string='aftermath')[-1]
|
||||
|
||||
# The <noscript> tag was moved beneath a copy of the <a> tag,
|
||||
# but the 'target' string within is still connected to the
|
||||
# (second) 'aftermath' string.
|
||||
assert final_aftermath == target.next_element
|
||||
assert target == final_aftermath.previous_element
|
||||
|
||||
def test_processing_instruction(self):
|
||||
"""Processing instructions become comments."""
|
||||
markup = b"""<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
assert str(soup).startswith("<!--?PITarget PIContent?-->")
|
||||
|
||||
def test_cloned_multivalue_node(self):
|
||||
markup = b"""<a class="my_class"><p></a>"""
|
||||
soup = self.soup(markup)
|
||||
a1, a2 = soup.find_all('a')
|
||||
assert a1 == a2
|
||||
assert a1 is not a2
|
||||
|
||||
def test_foster_parenting(self):
|
||||
markup = b"""<table><td></tbody>A"""
|
||||
soup = self.soup(markup)
|
||||
assert "<body>A<table><tbody><tr><td></td></tr></tbody></table></body>" == soup.body.decode()
|
||||
|
||||
def test_extraction(self):
|
||||
"""
|
||||
Test that extraction does not destroy the tree.
|
||||
|
||||
https://bugs.launchpad.net/beautifulsoup/+bug/1782928
|
||||
"""
|
||||
|
||||
markup = """
|
||||
<html><head></head>
|
||||
<style>
|
||||
</style><script></script><body><p>hello</p></body></html>
|
||||
"""
|
||||
soup = self.soup(markup)
|
||||
[s.extract() for s in soup('script')]
|
||||
[s.extract() for s in soup('style')]
|
||||
|
||||
assert len(soup.find_all("p")) == 1
|
||||
|
||||
def test_empty_comment(self):
|
||||
"""
|
||||
Test that empty comment does not break structure.
|
||||
|
||||
https://bugs.launchpad.net/beautifulsoup/+bug/1806598
|
||||
"""
|
||||
|
||||
markup = """
|
||||
<html>
|
||||
<body>
|
||||
<form>
|
||||
<!----><input type="text">
|
||||
</form>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
soup = self.soup(markup)
|
||||
inputs = []
|
||||
for form in soup.find_all('form'):
|
||||
inputs.extend(form.find_all('input'))
|
||||
assert len(inputs) == 1
|
||||
|
||||
def test_tracking_line_numbers(self):
|
||||
# The html.parser TreeBuilder keeps track of line number and
|
||||
# position of each element.
|
||||
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
|
||||
soup = self.soup(markup)
|
||||
assert 2 == soup.p.sourceline
|
||||
assert 5 == soup.p.sourcepos
|
||||
assert "sourceline" == soup.p.find('sourceline').name
|
||||
|
||||
# You can deactivate this behavior.
|
||||
soup = self.soup(markup, store_line_numbers=False)
|
||||
assert "sourceline" == soup.p.sourceline.name
|
||||
assert "sourcepos" == soup.p.sourcepos.name
|
||||
|
||||
def test_special_string_containers(self):
|
||||
# The html5lib tree builder doesn't support this standard feature,
|
||||
# because there's no way of knowing, when a string is created,
|
||||
# where in the tree it will eventually end up.
|
||||
pass
|
||||
|
||||
def test_html5_attributes(self):
|
||||
# The html5lib TreeBuilder can convert any entity named in
|
||||
# the HTML5 spec to a sequence of Unicode characters, and
|
||||
# convert those Unicode characters to a (potentially
|
||||
# different) named entity on the way out.
|
||||
#
|
||||
# This is a copy of the same test from
|
||||
# HTMLParserTreeBuilderSmokeTest. It's not in the superclass
|
||||
# because the lxml HTML TreeBuilder _doesn't_ work this way.
|
||||
for input_element, output_unicode, output_element in (
|
||||
("⇄", '\u21c4', b'⇄'),
|
||||
('⊧', '\u22a7', b'⊧'),
|
||||
('𝔑', '\U0001d511', b'𝔑'),
|
||||
('≧̸', '\u2267\u0338', b'≧̸'),
|
||||
('¬', '\xac', b'¬'),
|
||||
('⫬', '\u2aec', b'⫬'),
|
||||
('"', '"', b'"'),
|
||||
('∴', '\u2234', b'∴'),
|
||||
('∴', '\u2234', b'∴'),
|
||||
('∴', '\u2234', b'∴'),
|
||||
("fj", 'fj', b'fj'),
|
||||
("⊔", '\u2294', b'⊔'),
|
||||
("⊔︀", '\u2294\ufe00', b'⊔︀'),
|
||||
("'", "'", b"'"),
|
||||
("|", "|", b"|"),
|
||||
):
|
||||
markup = '<div>%s</div>' % input_element
|
||||
div = self.soup(markup).div
|
||||
without_element = div.encode()
|
||||
expect = b"<div>%s</div>" % output_unicode.encode("utf8")
|
||||
assert without_element == expect
|
||||
|
||||
with_element = div.encode(formatter="html")
|
||||
expect = b"<div>%s</div>" % output_element
|
||||
assert with_element == expect
|
136
lib/bs4/tests/test_htmlparser.py
Normal file
136
lib/bs4/tests/test_htmlparser.py
Normal file
|
@ -0,0 +1,136 @@
|
|||
"""Tests to ensure that the html.parser tree builder generates good
|
||||
trees."""
|
||||
|
||||
from pdb import set_trace
|
||||
import pickle
|
||||
import warnings
|
||||
from bs4.builder import (
|
||||
HTMLParserTreeBuilder,
|
||||
XMLParsedAsHTMLWarning,
|
||||
)
|
||||
from bs4.builder._htmlparser import BeautifulSoupHTMLParser
|
||||
from . import SoupTest, HTMLTreeBuilderSmokeTest
|
||||
|
||||
class TestHTMLParserTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest):
|
||||
|
||||
default_builder = HTMLParserTreeBuilder
|
||||
|
||||
def test_namespaced_system_doctype(self):
|
||||
# html.parser can't handle namespaced doctypes, so skip this one.
|
||||
pass
|
||||
|
||||
def test_namespaced_public_doctype(self):
|
||||
# html.parser can't handle namespaced doctypes, so skip this one.
|
||||
pass
|
||||
|
||||
def test_builder_is_pickled(self):
|
||||
"""Unlike most tree builders, HTMLParserTreeBuilder and will
|
||||
be restored after pickling.
|
||||
"""
|
||||
tree = self.soup("<a><b>foo</a>")
|
||||
dumped = pickle.dumps(tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
assert isinstance(loaded.builder, type(tree.builder))
|
||||
|
||||
def test_redundant_empty_element_closing_tags(self):
|
||||
self.assert_soup('<br></br><br></br><br></br>', "<br/><br/><br/>")
|
||||
self.assert_soup('</br></br></br>', "")
|
||||
|
||||
def test_empty_element(self):
|
||||
# This verifies that any buffered data present when the parser
|
||||
# finishes working is handled.
|
||||
self.assert_soup("foo &# bar", "foo &# bar")
|
||||
|
||||
def test_tracking_line_numbers(self):
|
||||
# The html.parser TreeBuilder keeps track of line number and
|
||||
# position of each element.
|
||||
markup = "\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>"
|
||||
soup = self.soup(markup)
|
||||
assert 2 == soup.p.sourceline
|
||||
assert 3 == soup.p.sourcepos
|
||||
assert "sourceline" == soup.p.find('sourceline').name
|
||||
|
||||
# You can deactivate this behavior.
|
||||
soup = self.soup(markup, store_line_numbers=False)
|
||||
assert "sourceline" == soup.p.sourceline.name
|
||||
assert "sourcepos" == soup.p.sourcepos.name
|
||||
|
||||
def test_on_duplicate_attribute(self):
|
||||
# The html.parser tree builder has a variety of ways of
|
||||
# handling a tag that contains the same attribute multiple times.
|
||||
|
||||
markup = '<a class="cls" href="url1" href="url2" href="url3" id="id">'
|
||||
|
||||
# If you don't provide any particular value for
|
||||
# on_duplicate_attribute, later values replace earlier values.
|
||||
soup = self.soup(markup)
|
||||
assert "url3" == soup.a['href']
|
||||
assert ["cls"] == soup.a['class']
|
||||
assert "id" == soup.a['id']
|
||||
|
||||
# You can also get this behavior explicitly.
|
||||
def assert_attribute(on_duplicate_attribute, expected):
|
||||
soup = self.soup(
|
||||
markup, on_duplicate_attribute=on_duplicate_attribute
|
||||
)
|
||||
assert expected == soup.a['href']
|
||||
|
||||
# Verify that non-duplicate attributes are treated normally.
|
||||
assert ["cls"] == soup.a['class']
|
||||
assert "id" == soup.a['id']
|
||||
assert_attribute(None, "url3")
|
||||
assert_attribute(BeautifulSoupHTMLParser.REPLACE, "url3")
|
||||
|
||||
# You can ignore subsequent values in favor of the first.
|
||||
assert_attribute(BeautifulSoupHTMLParser.IGNORE, "url1")
|
||||
|
||||
# And you can pass in a callable that does whatever you want.
|
||||
def accumulate(attrs, key, value):
|
||||
if not isinstance(attrs[key], list):
|
||||
attrs[key] = [attrs[key]]
|
||||
attrs[key].append(value)
|
||||
assert_attribute(accumulate, ["url1", "url2", "url3"])
|
||||
|
||||
def test_html5_attributes(self):
|
||||
# The html.parser TreeBuilder can convert any entity named in
|
||||
# the HTML5 spec to a sequence of Unicode characters, and
|
||||
# convert those Unicode characters to a (potentially
|
||||
# different) named entity on the way out.
|
||||
for input_element, output_unicode, output_element in (
|
||||
("⇄", '\u21c4', b'⇄'),
|
||||
('⊧', '\u22a7', b'⊧'),
|
||||
('𝔑', '\U0001d511', b'𝔑'),
|
||||
('≧̸', '\u2267\u0338', b'≧̸'),
|
||||
('¬', '\xac', b'¬'),
|
||||
('⫬', '\u2aec', b'⫬'),
|
||||
('"', '"', b'"'),
|
||||
('∴', '\u2234', b'∴'),
|
||||
('∴', '\u2234', b'∴'),
|
||||
('∴', '\u2234', b'∴'),
|
||||
("fj", 'fj', b'fj'),
|
||||
("⊔", '\u2294', b'⊔'),
|
||||
("⊔︀", '\u2294\ufe00', b'⊔︀'),
|
||||
("'", "'", b"'"),
|
||||
("|", "|", b"|"),
|
||||
):
|
||||
markup = '<div>%s</div>' % input_element
|
||||
div = self.soup(markup).div
|
||||
without_element = div.encode()
|
||||
expect = b"<div>%s</div>" % output_unicode.encode("utf8")
|
||||
assert without_element == expect
|
||||
|
||||
with_element = div.encode(formatter="html")
|
||||
expect = b"<div>%s</div>" % output_element
|
||||
assert with_element == expect
|
||||
|
||||
class TestHTMLParserSubclass(SoupTest):
|
||||
def test_error(self):
|
||||
"""Verify that our HTMLParser subclass implements error() in a way
|
||||
that doesn't cause a crash.
|
||||
"""
|
||||
parser = BeautifulSoupHTMLParser()
|
||||
with warnings.catch_warnings(record=True) as warns:
|
||||
parser.error("don't crash")
|
||||
[warning] = warns
|
||||
assert "don't crash" == str(warning.message)
|
||||
|
199
lib/bs4/tests/test_lxml.py
Normal file
199
lib/bs4/tests/test_lxml.py
Normal file
|
@ -0,0 +1,199 @@
|
|||
"""Tests to ensure that the lxml tree builder generates good trees."""
|
||||
|
||||
import pickle
|
||||
import re
|
||||
import warnings
|
||||
|
||||
try:
|
||||
import lxml.etree
|
||||
LXML_PRESENT = True
|
||||
LXML_VERSION = lxml.etree.LXML_VERSION
|
||||
except ImportError as e:
|
||||
LXML_PRESENT = False
|
||||
LXML_VERSION = (0,)
|
||||
|
||||
if LXML_PRESENT:
|
||||
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
|
||||
|
||||
from bs4 import (
|
||||
BeautifulSoup,
|
||||
BeautifulStoneSoup,
|
||||
)
|
||||
from bs4.element import Comment, Doctype, SoupStrainer
|
||||
from . import (
|
||||
HTMLTreeBuilderSmokeTest,
|
||||
XMLTreeBuilderSmokeTest,
|
||||
SoupTest,
|
||||
skipIf,
|
||||
)
|
||||
|
||||
@skipIf(
|
||||
not LXML_PRESENT,
|
||||
"lxml seems not to be present, not testing its tree builder.")
|
||||
class TestLXMLTreeBuilder(SoupTest, HTMLTreeBuilderSmokeTest):
|
||||
"""See ``HTMLTreeBuilderSmokeTest``."""
|
||||
|
||||
@property
|
||||
def default_builder(self):
|
||||
return LXMLTreeBuilder
|
||||
|
||||
def test_out_of_range_entity(self):
|
||||
self.assert_soup(
|
||||
"<p>foo�bar</p>", "<p>foobar</p>")
|
||||
self.assert_soup(
|
||||
"<p>foo�bar</p>", "<p>foobar</p>")
|
||||
self.assert_soup(
|
||||
"<p>foo�bar</p>", "<p>foobar</p>")
|
||||
|
||||
def test_entities_in_foreign_document_encoding(self):
|
||||
# We can't implement this case correctly because by the time we
|
||||
# hear about markup like "“", it's been (incorrectly) converted into
|
||||
# a string like u'\x93'
|
||||
pass
|
||||
|
||||
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
|
||||
# test if an old version of lxml is installed.
|
||||
|
||||
@skipIf(
|
||||
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
|
||||
"Skipping doctype test for old version of lxml to avoid segfault.")
|
||||
def test_empty_doctype(self):
|
||||
soup = self.soup("<!DOCTYPE>")
|
||||
doctype = soup.contents[0]
|
||||
assert "" == doctype.strip()
|
||||
|
||||
def test_beautifulstonesoup_is_xml_parser(self):
|
||||
# Make sure that the deprecated BSS class uses an xml builder
|
||||
# if one is installed.
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = BeautifulStoneSoup("<b />")
|
||||
assert "<b/>" == str(soup.b)
|
||||
assert "BeautifulStoneSoup class is deprecated" in str(w[0].message)
|
||||
|
||||
def test_tracking_line_numbers(self):
|
||||
# The lxml TreeBuilder cannot keep track of line numbers from
|
||||
# the original markup. Even if you ask for line numbers, we
|
||||
# don't have 'em.
|
||||
#
|
||||
# This means that if you have a tag like <sourceline> or
|
||||
# <sourcepos>, attribute access will find it rather than
|
||||
# giving you a numeric answer.
|
||||
soup = self.soup(
|
||||
"\n <p>\n\n<sourceline>\n<b>text</b></sourceline><sourcepos></p>",
|
||||
store_line_numbers=True
|
||||
)
|
||||
assert "sourceline" == soup.p.sourceline.name
|
||||
assert "sourcepos" == soup.p.sourcepos.name
|
||||
|
||||
@skipIf(
|
||||
not LXML_PRESENT,
|
||||
"lxml seems not to be present, not testing its XML tree builder.")
|
||||
class TestLXMLXMLTreeBuilder(SoupTest, XMLTreeBuilderSmokeTest):
|
||||
"""See ``HTMLTreeBuilderSmokeTest``."""
|
||||
|
||||
@property
|
||||
def default_builder(self):
|
||||
return LXMLTreeBuilderForXML
|
||||
|
||||
def test_namespace_indexing(self):
|
||||
soup = self.soup(
|
||||
'<?xml version="1.1"?>\n'
|
||||
'<root>'
|
||||
'<tag xmlns="http://unprefixed-namespace.com">content</tag>'
|
||||
'<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</prefix:tag2>'
|
||||
'<prefix2:tag3 xmlns:prefix2="http://another-namespace.com">'
|
||||
'<subtag xmlns="http://another-unprefixed-namespace.com">'
|
||||
'<subsubtag xmlns="http://yet-another-unprefixed-namespace.com">'
|
||||
'</prefix2:tag3>'
|
||||
'</root>'
|
||||
)
|
||||
|
||||
# The BeautifulSoup object includes every namespace prefix
|
||||
# defined in the entire document. This is the default set of
|
||||
# namespaces used by soupsieve.
|
||||
#
|
||||
# Un-prefixed namespaces are not included, and if a given
|
||||
# prefix is defined twice, only the first prefix encountered
|
||||
# in the document shows up here.
|
||||
assert soup._namespaces == {
|
||||
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||
'prefix': 'http://prefixed-namespace.com',
|
||||
'prefix2': 'http://another-namespace.com'
|
||||
}
|
||||
|
||||
# A Tag object includes only the namespace prefixes
|
||||
# that were in scope when it was parsed.
|
||||
|
||||
# We do not track un-prefixed namespaces as we can only hold
|
||||
# one (the first one), and it will be recognized as the
|
||||
# default namespace by soupsieve, even when operating from a
|
||||
# tag with a different un-prefixed namespace.
|
||||
assert soup.tag._namespaces == {
|
||||
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||
}
|
||||
|
||||
assert soup.tag2._namespaces == {
|
||||
'prefix': 'http://prefixed-namespace.com',
|
||||
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||
}
|
||||
|
||||
assert soup.subtag._namespaces == {
|
||||
'prefix2': 'http://another-namespace.com',
|
||||
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||
}
|
||||
|
||||
assert soup.subsubtag._namespaces == {
|
||||
'prefix2': 'http://another-namespace.com',
|
||||
'xml': 'http://www.w3.org/XML/1998/namespace',
|
||||
}
|
||||
|
||||
|
||||
def test_namespace_interaction_with_select_and_find(self):
|
||||
# Demonstrate how namespaces interact with select* and
|
||||
# find* methods.
|
||||
|
||||
soup = self.soup(
|
||||
'<?xml version="1.1"?>\n'
|
||||
'<root>'
|
||||
'<tag xmlns="http://unprefixed-namespace.com">content</tag>'
|
||||
'<prefix:tag2 xmlns:prefix="http://prefixed-namespace.com">content</tag>'
|
||||
'<subtag xmlns:prefix="http://another-namespace-same-prefix.com">'
|
||||
'<prefix:tag3>'
|
||||
'</subtag>'
|
||||
'</root>'
|
||||
)
|
||||
|
||||
# soupselect uses namespace URIs.
|
||||
assert soup.select_one('tag').name == 'tag'
|
||||
assert soup.select_one('prefix|tag2').name == 'tag2'
|
||||
|
||||
# If a prefix is declared more than once, only the first usage
|
||||
# is registered with the BeautifulSoup object.
|
||||
assert soup.select_one('prefix|tag3') is None
|
||||
|
||||
# But you can always explicitly specify a namespace dictionary.
|
||||
assert soup.select_one(
|
||||
'prefix|tag3', namespaces=soup.subtag._namespaces
|
||||
).name == 'tag3'
|
||||
|
||||
# And a Tag (as opposed to the BeautifulSoup object) will
|
||||
# have a set of default namespaces scoped to that Tag.
|
||||
assert soup.subtag.select_one('prefix|tag3').name=='tag3'
|
||||
|
||||
# the find() methods aren't fully namespace-aware; they just
|
||||
# look at prefixes.
|
||||
assert soup.find('tag').name == 'tag'
|
||||
assert soup.find('prefix:tag2').name == 'tag2'
|
||||
assert soup.find('prefix:tag3').name == 'tag3'
|
||||
assert soup.subtag.find('prefix:tag3').name == 'tag3'
|
||||
|
||||
def test_pickle_removes_builder(self):
|
||||
# The lxml TreeBuilder is not picklable, so it won't be
|
||||
# preserved in a pickle/unpickle operation.
|
||||
|
||||
soup = self.soup("<a>some markup</a>")
|
||||
assert isinstance(soup.builder, self.default_builder)
|
||||
pickled = pickle.dumps(soup)
|
||||
unpickled = pickle.loads(pickled)
|
||||
assert "some markup" == unpickled.a.string
|
||||
assert unpickled.builder is None
|
144
lib/bs4/tests/test_navigablestring.py
Normal file
144
lib/bs4/tests/test_navigablestring.py
Normal file
|
@ -0,0 +1,144 @@
|
|||
import pytest
|
||||
|
||||
from bs4.element import (
|
||||
CData,
|
||||
Comment,
|
||||
Declaration,
|
||||
Doctype,
|
||||
NavigableString,
|
||||
RubyParenthesisString,
|
||||
RubyTextString,
|
||||
Script,
|
||||
Stylesheet,
|
||||
TemplateString,
|
||||
)
|
||||
|
||||
from . import SoupTest
|
||||
|
||||
class TestNavigableString(SoupTest):
|
||||
|
||||
def test_text_acquisition_methods(self):
|
||||
# These methods are intended for use against Tag, but they
|
||||
# work on NavigableString as well,
|
||||
|
||||
s = NavigableString("fee ")
|
||||
cdata = CData("fie ")
|
||||
comment = Comment("foe ")
|
||||
|
||||
assert "fee " == s.get_text()
|
||||
assert "fee" == s.get_text(strip=True)
|
||||
assert ["fee "] == list(s.strings)
|
||||
assert ["fee"] == list(s.stripped_strings)
|
||||
assert ["fee "] == list(s._all_strings())
|
||||
|
||||
assert "fie " == cdata.get_text()
|
||||
assert "fie" == cdata.get_text(strip=True)
|
||||
assert ["fie "] == list(cdata.strings)
|
||||
assert ["fie"] == list(cdata.stripped_strings)
|
||||
assert ["fie "] == list(cdata._all_strings())
|
||||
|
||||
# Since a Comment isn't normally considered 'text',
|
||||
# these methods generally do nothing.
|
||||
assert "" == comment.get_text()
|
||||
assert [] == list(comment.strings)
|
||||
assert [] == list(comment.stripped_strings)
|
||||
assert [] == list(comment._all_strings())
|
||||
|
||||
# Unless you specifically say that comments are okay.
|
||||
assert "foe" == comment.get_text(strip=True, types=Comment)
|
||||
assert "foe " == comment.get_text(types=(Comment, NavigableString))
|
||||
|
||||
def test_string_has_immutable_name_property(self):
|
||||
# string.name is defined as None and can't be modified
|
||||
string = self.soup("s").string
|
||||
assert None == string.name
|
||||
with pytest.raises(AttributeError):
|
||||
string.name = 'foo'
|
||||
|
||||
class TestNavigableStringSubclasses(SoupTest):
|
||||
|
||||
def test_cdata(self):
|
||||
# None of the current builders turn CDATA sections into CData
|
||||
# objects, but you can create them manually.
|
||||
soup = self.soup("")
|
||||
cdata = CData("foo")
|
||||
soup.insert(1, cdata)
|
||||
assert str(soup) == "<![CDATA[foo]]>"
|
||||
assert soup.find(string="foo") == "foo"
|
||||
assert soup.contents[0] == "foo"
|
||||
|
||||
def test_cdata_is_never_formatted(self):
|
||||
"""Text inside a CData object is passed into the formatter.
|
||||
|
||||
But the return value is ignored.
|
||||
"""
|
||||
|
||||
self.count = 0
|
||||
def increment(*args):
|
||||
self.count += 1
|
||||
return "BITTER FAILURE"
|
||||
|
||||
soup = self.soup("")
|
||||
cdata = CData("<><><>")
|
||||
soup.insert(1, cdata)
|
||||
assert b"<![CDATA[<><><>]]>" == soup.encode(formatter=increment)
|
||||
assert 1 == self.count
|
||||
|
||||
def test_doctype_ends_in_newline(self):
|
||||
# Unlike other NavigableString subclasses, a DOCTYPE always ends
|
||||
# in a newline.
|
||||
doctype = Doctype("foo")
|
||||
soup = self.soup("")
|
||||
soup.insert(1, doctype)
|
||||
assert soup.encode() == b"<!DOCTYPE foo>\n"
|
||||
|
||||
def test_declaration(self):
|
||||
d = Declaration("foo")
|
||||
assert "<?foo?>" == d.output_ready()
|
||||
|
||||
def test_default_string_containers(self):
|
||||
# In some cases, we use different NavigableString subclasses for
|
||||
# the same text in different tags.
|
||||
soup = self.soup(
|
||||
"<div>text</div><script>text</script><style>text</style>"
|
||||
)
|
||||
assert [NavigableString, Script, Stylesheet] == [
|
||||
x.__class__ for x in soup.find_all(string=True)
|
||||
]
|
||||
|
||||
# The TemplateString is a little unusual because it's generally found
|
||||
# _inside_ children of a <template> element, not a direct child of the
|
||||
# <template> element.
|
||||
soup = self.soup(
|
||||
"<template>Some text<p>In a tag</p></template>Some text outside"
|
||||
)
|
||||
assert all(
|
||||
isinstance(x, TemplateString)
|
||||
for x in soup.template._all_strings(types=None)
|
||||
)
|
||||
|
||||
# Once the <template> tag closed, we went back to using
|
||||
# NavigableString.
|
||||
outside = soup.template.next_sibling
|
||||
assert isinstance(outside, NavigableString)
|
||||
assert not isinstance(outside, TemplateString)
|
||||
|
||||
# The TemplateString is also unusual because it can contain
|
||||
# NavigableString subclasses of _other_ types, such as
|
||||
# Comment.
|
||||
markup = b"<template>Some text<p>In a tag</p><!--with a comment--></template>"
|
||||
soup = self.soup(markup)
|
||||
assert markup == soup.template.encode("utf8")
|
||||
|
||||
def test_ruby_strings(self):
|
||||
markup = "<ruby>漢 <rp>(</rp><rt>kan</rt><rp>)</rp> 字 <rp>(</rp><rt>ji</rt><rp>)</rp></ruby>"
|
||||
soup = self.soup(markup)
|
||||
assert isinstance(soup.rp.string, RubyParenthesisString)
|
||||
assert isinstance(soup.rt.string, RubyTextString)
|
||||
|
||||
# Just as a demo, here's what this means for get_text usage.
|
||||
assert "漢字" == soup.get_text(strip=True)
|
||||
assert "漢(kan)字(ji)" == soup.get_text(
|
||||
strip=True,
|
||||
types=(NavigableString, RubyTextString, RubyParenthesisString)
|
||||
)
|
751
lib/bs4/tests/test_pageelement.py
Normal file
751
lib/bs4/tests/test_pageelement.py
Normal file
|
@ -0,0 +1,751 @@
|
|||
"""Tests of the bs4.element.PageElement class"""
|
||||
import copy
|
||||
import pickle
|
||||
import pytest
|
||||
|
||||
from soupsieve import SelectorSyntaxError
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
SoupStrainer,
|
||||
)
|
||||
from . import SoupTest
|
||||
|
||||
|
||||
class TestEncoding(SoupTest):
|
||||
"""Test the ability to encode objects into strings."""
|
||||
|
||||
def test_unicode_string_can_be_encoded(self):
|
||||
html = "<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
assert soup.b.string.encode("utf-8") == "\N{SNOWMAN}".encode("utf-8")
|
||||
|
||||
def test_tag_containing_unicode_string_can_be_encoded(self):
|
||||
html = "<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
assert soup.b.encode("utf-8") == html.encode("utf-8")
|
||||
|
||||
def test_encoding_substitutes_unrecognized_characters_by_default(self):
|
||||
html = "<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
assert soup.b.encode("ascii") == b"<b>☃</b>"
|
||||
|
||||
def test_encoding_can_be_made_strict(self):
|
||||
html = "<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
with pytest.raises(UnicodeEncodeError):
|
||||
soup.encode("ascii", errors="strict")
|
||||
|
||||
def test_decode_contents(self):
|
||||
html = "<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
assert "\N{SNOWMAN}" == soup.b.decode_contents()
|
||||
|
||||
def test_encode_contents(self):
|
||||
html = "<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
assert "\N{SNOWMAN}".encode("utf8") == soup.b.encode_contents(
|
||||
encoding="utf8"
|
||||
)
|
||||
|
||||
def test_deprecated_renderContents(self):
|
||||
html = "<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
assert "\N{SNOWMAN}".encode("utf8") == soup.b.renderContents()
|
||||
|
||||
def test_repr(self):
|
||||
html = "<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
assert html == repr(soup)
|
||||
|
||||
|
||||
class TestFormatters(SoupTest):
|
||||
"""Test the formatting feature, used by methods like decode() and
|
||||
prettify(), and the formatters themselves.
|
||||
"""
|
||||
|
||||
def test_default_formatter_is_minimal(self):
|
||||
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||
soup = self.soup(markup)
|
||||
decoded = soup.decode(formatter="minimal")
|
||||
# The < is converted back into < but the e-with-acute is left alone.
|
||||
assert decoded == self.document_for(
|
||||
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||
)
|
||||
|
||||
def test_formatter_html(self):
|
||||
markup = "<br><b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||
soup = self.soup(markup)
|
||||
decoded = soup.decode(formatter="html")
|
||||
assert decoded == self.document_for(
|
||||
"<br/><b><<Sacré bleu!>></b>"
|
||||
)
|
||||
|
||||
def test_formatter_html5(self):
|
||||
markup = "<br><b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||
soup = self.soup(markup)
|
||||
decoded = soup.decode(formatter="html5")
|
||||
assert decoded == self.document_for(
|
||||
"<br><b><<Sacré bleu!>></b>"
|
||||
)
|
||||
|
||||
def test_formatter_minimal(self):
|
||||
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||
soup = self.soup(markup)
|
||||
decoded = soup.decode(formatter="minimal")
|
||||
# The < is converted back into < but the e-with-acute is left alone.
|
||||
assert decoded == self.document_for(
|
||||
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||
)
|
||||
|
||||
def test_formatter_null(self):
|
||||
markup = "<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||
soup = self.soup(markup)
|
||||
decoded = soup.decode(formatter=None)
|
||||
# Neither the angle brackets nor the e-with-acute are converted.
|
||||
# This is not valid HTML, but it's what the user wanted.
|
||||
assert decoded == self.document_for(
|
||||
"<b><<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></b>"
|
||||
)
|
||||
|
||||
def test_formatter_custom(self):
|
||||
markup = "<b><foo></b><b>bar</b><br/>"
|
||||
soup = self.soup(markup)
|
||||
decoded = soup.decode(formatter = lambda x: x.upper())
|
||||
# Instead of normal entity conversion code, the custom
|
||||
# callable is called on every string.
|
||||
assert decoded == self.document_for("<b><FOO></b><b>BAR</b><br/>")
|
||||
|
||||
def test_formatter_is_run_on_attribute_values(self):
|
||||
markup = '<a href="http://a.com?a=b&c=é">e</a>'
|
||||
soup = self.soup(markup)
|
||||
a = soup.a
|
||||
|
||||
expect_minimal = '<a href="http://a.com?a=b&c=é">e</a>'
|
||||
|
||||
assert expect_minimal == a.decode()
|
||||
assert expect_minimal == a.decode(formatter="minimal")
|
||||
|
||||
expect_html = '<a href="http://a.com?a=b&c=é">e</a>'
|
||||
assert expect_html == a.decode(formatter="html")
|
||||
|
||||
assert markup == a.decode(formatter=None)
|
||||
expect_upper = '<a href="HTTP://A.COM?A=B&C=É">E</a>'
|
||||
assert expect_upper == a.decode(formatter=lambda x: x.upper())
|
||||
|
||||
def test_formatter_skips_script_tag_for_html_documents(self):
|
||||
doc = """
|
||||
<script type="text/javascript">
|
||||
console.log("< < hey > > ");
|
||||
</script>
|
||||
"""
|
||||
encoded = BeautifulSoup(doc, 'html.parser').encode()
|
||||
assert b"< < hey > >" in encoded
|
||||
|
||||
def test_formatter_skips_style_tag_for_html_documents(self):
|
||||
doc = """
|
||||
<style type="text/css">
|
||||
console.log("< < hey > > ");
|
||||
</style>
|
||||
"""
|
||||
encoded = BeautifulSoup(doc, 'html.parser').encode()
|
||||
assert b"< < hey > >" in encoded
|
||||
|
||||
def test_prettify_leaves_preformatted_text_alone(self):
|
||||
soup = self.soup("<div> foo <pre> \tbar\n \n </pre> baz <textarea> eee\nfff\t</textarea></div>")
|
||||
# Everything outside the <pre> tag is reformatted, but everything
|
||||
# inside is left alone.
|
||||
assert '<div>\n foo\n <pre> \tbar\n \n </pre>\n baz\n <textarea> eee\nfff\t</textarea>\n</div>' == soup.div.prettify()
|
||||
|
||||
def test_prettify_accepts_formatter_function(self):
|
||||
soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser')
|
||||
pretty = soup.prettify(formatter = lambda x: x.upper())
|
||||
assert "FOO" in pretty
|
||||
|
||||
def test_prettify_outputs_unicode_by_default(self):
|
||||
soup = self.soup("<a></a>")
|
||||
assert str == type(soup.prettify())
|
||||
|
||||
def test_prettify_can_encode_data(self):
|
||||
soup = self.soup("<a></a>")
|
||||
assert bytes == type(soup.prettify("utf-8"))
|
||||
|
||||
def test_html_entity_substitution_off_by_default(self):
|
||||
markup = "<b>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</b>"
|
||||
soup = self.soup(markup)
|
||||
encoded = soup.b.encode("utf-8")
|
||||
assert encoded == markup.encode('utf-8')
|
||||
|
||||
def test_encoding_substitution(self):
|
||||
# Here's the <meta> tag saying that a document is
|
||||
# encoded in Shift-JIS.
|
||||
meta_tag = ('<meta content="text/html; charset=x-sjis" '
|
||||
'http-equiv="Content-type"/>')
|
||||
soup = self.soup(meta_tag)
|
||||
|
||||
# Parse the document, and the charset apprears unchanged.
|
||||
assert soup.meta['content'] == 'text/html; charset=x-sjis'
|
||||
|
||||
# Encode the document into some encoding, and the encoding is
|
||||
# substituted into the meta tag.
|
||||
utf_8 = soup.encode("utf-8")
|
||||
assert b"charset=utf-8" in utf_8
|
||||
|
||||
euc_jp = soup.encode("euc_jp")
|
||||
assert b"charset=euc_jp" in euc_jp
|
||||
|
||||
shift_jis = soup.encode("shift-jis")
|
||||
assert b"charset=shift-jis" in shift_jis
|
||||
|
||||
utf_16_u = soup.encode("utf-16").decode("utf-16")
|
||||
assert "charset=utf-16" in utf_16_u
|
||||
|
||||
def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self):
|
||||
markup = ('<head><meta content="text/html; charset=x-sjis" '
|
||||
'http-equiv="Content-type"/></head><pre>foo</pre>')
|
||||
|
||||
# Beautiful Soup used to try to rewrite the meta tag even if the
|
||||
# meta tag got filtered out by the strainer. This test makes
|
||||
# sure that doesn't happen.
|
||||
strainer = SoupStrainer('pre')
|
||||
soup = self.soup(markup, parse_only=strainer)
|
||||
assert soup.contents[0].name == 'pre'
|
||||
|
||||
|
||||
class TestCSSSelectors(SoupTest):
|
||||
"""Test basic CSS selector functionality.
|
||||
|
||||
This functionality is implemented in soupsieve, which has a much
|
||||
more comprehensive test suite, so this is basically an extra check
|
||||
that soupsieve works as expected.
|
||||
"""
|
||||
|
||||
HTML = """
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN"
|
||||
"http://www.w3.org/TR/html4/strict.dtd">
|
||||
<html>
|
||||
<head>
|
||||
<title>The title</title>
|
||||
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
|
||||
</head>
|
||||
<body>
|
||||
<custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag>
|
||||
<div id="main" class="fancy">
|
||||
<div id="inner">
|
||||
<h1 id="header1">An H1</h1>
|
||||
<p>Some text</p>
|
||||
<p class="onep" id="p1">Some more text</p>
|
||||
<h2 id="header2">An H2</h2>
|
||||
<p class="class1 class2 class3" id="pmulti">Another</p>
|
||||
<a href="http://bob.example.org/" rel="friend met" id="bob">Bob</a>
|
||||
<h2 id="header3">Another H2</h2>
|
||||
<a id="me" href="http://simonwillison.net/" rel="me">me</a>
|
||||
<span class="s1">
|
||||
<a href="#" id="s1a1">span1a1</a>
|
||||
<a href="#" id="s1a2">span1a2 <span id="s1a2s1">test</span></a>
|
||||
<span class="span2">
|
||||
<a href="#" id="s2a1">span2a1</a>
|
||||
</span>
|
||||
<span class="span3"></span>
|
||||
<custom-dashed-tag class="dashed" id="dash2"/>
|
||||
<div data-tag="dashedvalue" id="data1"/>
|
||||
</span>
|
||||
</div>
|
||||
<x id="xid">
|
||||
<z id="zida"/>
|
||||
<z id="zidab"/>
|
||||
<z id="zidac"/>
|
||||
</x>
|
||||
<y id="yid">
|
||||
<z id="zidb"/>
|
||||
</y>
|
||||
<p lang="en" id="lang-en">English</p>
|
||||
<p lang="en-gb" id="lang-en-gb">English UK</p>
|
||||
<p lang="en-us" id="lang-en-us">English US</p>
|
||||
<p lang="fr" id="lang-fr">French</p>
|
||||
</div>
|
||||
|
||||
<div id="footer">
|
||||
</div>
|
||||
"""
|
||||
|
||||
def setup_method(self):
|
||||
self.soup = BeautifulSoup(self.HTML, 'html.parser')
|
||||
|
||||
def assert_selects(self, selector, expected_ids, **kwargs):
|
||||
el_ids = [el['id'] for el in self.soup.select(selector, **kwargs)]
|
||||
el_ids.sort()
|
||||
expected_ids.sort()
|
||||
assert expected_ids == el_ids, "Selector %s, expected [%s], got [%s]" % (
|
||||
selector, ', '.join(expected_ids), ', '.join(el_ids)
|
||||
)
|
||||
|
||||
assertSelect = assert_selects
|
||||
|
||||
def assert_select_multiple(self, *tests):
|
||||
for selector, expected_ids in tests:
|
||||
self.assert_selects(selector, expected_ids)
|
||||
|
||||
def test_one_tag_one(self):
|
||||
els = self.soup.select('title')
|
||||
assert len(els) == 1
|
||||
assert els[0].name == 'title'
|
||||
assert els[0].contents == ['The title']
|
||||
|
||||
def test_one_tag_many(self):
|
||||
els = self.soup.select('div')
|
||||
assert len(els) == 4
|
||||
for div in els:
|
||||
assert div.name == 'div'
|
||||
|
||||
el = self.soup.select_one('div')
|
||||
assert 'main' == el['id']
|
||||
|
||||
def test_select_one_returns_none_if_no_match(self):
|
||||
match = self.soup.select_one('nonexistenttag')
|
||||
assert None == match
|
||||
|
||||
|
||||
def test_tag_in_tag_one(self):
|
||||
els = self.soup.select('div div')
|
||||
self.assert_selects('div div', ['inner', 'data1'])
|
||||
|
||||
def test_tag_in_tag_many(self):
|
||||
for selector in ('html div', 'html body div', 'body div'):
|
||||
self.assert_selects(selector, ['data1', 'main', 'inner', 'footer'])
|
||||
|
||||
|
||||
def test_limit(self):
|
||||
self.assert_selects('html div', ['main'], limit=1)
|
||||
self.assert_selects('html body div', ['inner', 'main'], limit=2)
|
||||
self.assert_selects('body div', ['data1', 'main', 'inner', 'footer'],
|
||||
limit=10)
|
||||
|
||||
def test_tag_no_match(self):
|
||||
assert len(self.soup.select('del')) == 0
|
||||
|
||||
def test_invalid_tag(self):
|
||||
with pytest.raises(SelectorSyntaxError):
|
||||
self.soup.select('tag%t')
|
||||
|
||||
def test_select_dashed_tag_ids(self):
|
||||
self.assert_selects('custom-dashed-tag', ['dash1', 'dash2'])
|
||||
|
||||
def test_select_dashed_by_id(self):
|
||||
dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]')
|
||||
assert dashed[0].name == 'custom-dashed-tag'
|
||||
assert dashed[0]['id'] == 'dash2'
|
||||
|
||||
def test_dashed_tag_text(self):
|
||||
assert self.soup.select('body > custom-dashed-tag')[0].text == 'Hello there.'
|
||||
|
||||
def test_select_dashed_matches_find_all(self):
|
||||
assert self.soup.select('custom-dashed-tag') == self.soup.find_all('custom-dashed-tag')
|
||||
|
||||
def test_header_tags(self):
|
||||
self.assert_select_multiple(
|
||||
('h1', ['header1']),
|
||||
('h2', ['header2', 'header3']),
|
||||
)
|
||||
|
||||
def test_class_one(self):
|
||||
for selector in ('.onep', 'p.onep', 'html p.onep'):
|
||||
els = self.soup.select(selector)
|
||||
assert len(els) == 1
|
||||
assert els[0].name == 'p'
|
||||
assert els[0]['class'] == ['onep']
|
||||
|
||||
def test_class_mismatched_tag(self):
|
||||
els = self.soup.select('div.onep')
|
||||
assert len(els) == 0
|
||||
|
||||
def test_one_id(self):
|
||||
for selector in ('div#inner', '#inner', 'div div#inner'):
|
||||
self.assert_selects(selector, ['inner'])
|
||||
|
||||
def test_bad_id(self):
|
||||
els = self.soup.select('#doesnotexist')
|
||||
assert len(els) == 0
|
||||
|
||||
def test_items_in_id(self):
|
||||
els = self.soup.select('div#inner p')
|
||||
assert len(els) == 3
|
||||
for el in els:
|
||||
assert el.name == 'p'
|
||||
assert els[1]['class'] == ['onep']
|
||||
assert not els[0].has_attr('class')
|
||||
|
||||
def test_a_bunch_of_emptys(self):
|
||||
for selector in ('div#main del', 'div#main div.oops', 'div div#main'):
|
||||
assert len(self.soup.select(selector)) == 0
|
||||
|
||||
def test_multi_class_support(self):
|
||||
for selector in ('.class1', 'p.class1', '.class2', 'p.class2',
|
||||
'.class3', 'p.class3', 'html p.class2', 'div#inner .class2'):
|
||||
self.assert_selects(selector, ['pmulti'])
|
||||
|
||||
def test_multi_class_selection(self):
|
||||
for selector in ('.class1.class3', '.class3.class2',
|
||||
'.class1.class2.class3'):
|
||||
self.assert_selects(selector, ['pmulti'])
|
||||
|
||||
def test_child_selector(self):
|
||||
self.assert_selects('.s1 > a', ['s1a1', 's1a2'])
|
||||
self.assert_selects('.s1 > a span', ['s1a2s1'])
|
||||
|
||||
def test_child_selector_id(self):
|
||||
self.assert_selects('.s1 > a#s1a2 span', ['s1a2s1'])
|
||||
|
||||
def test_attribute_equals(self):
|
||||
self.assert_select_multiple(
|
||||
('p[class="onep"]', ['p1']),
|
||||
('p[id="p1"]', ['p1']),
|
||||
('[class="onep"]', ['p1']),
|
||||
('[id="p1"]', ['p1']),
|
||||
('link[rel="stylesheet"]', ['l1']),
|
||||
('link[type="text/css"]', ['l1']),
|
||||
('link[href="blah.css"]', ['l1']),
|
||||
('link[href="no-blah.css"]', []),
|
||||
('[rel="stylesheet"]', ['l1']),
|
||||
('[type="text/css"]', ['l1']),
|
||||
('[href="blah.css"]', ['l1']),
|
||||
('[href="no-blah.css"]', []),
|
||||
('p[href="no-blah.css"]', []),
|
||||
('[href="no-blah.css"]', []),
|
||||
)
|
||||
|
||||
def test_attribute_tilde(self):
|
||||
self.assert_select_multiple(
|
||||
('p[class~="class1"]', ['pmulti']),
|
||||
('p[class~="class2"]', ['pmulti']),
|
||||
('p[class~="class3"]', ['pmulti']),
|
||||
('[class~="class1"]', ['pmulti']),
|
||||
('[class~="class2"]', ['pmulti']),
|
||||
('[class~="class3"]', ['pmulti']),
|
||||
('a[rel~="friend"]', ['bob']),
|
||||
('a[rel~="met"]', ['bob']),
|
||||
('[rel~="friend"]', ['bob']),
|
||||
('[rel~="met"]', ['bob']),
|
||||
)
|
||||
|
||||
def test_attribute_startswith(self):
|
||||
self.assert_select_multiple(
|
||||
('[rel^="style"]', ['l1']),
|
||||
('link[rel^="style"]', ['l1']),
|
||||
('notlink[rel^="notstyle"]', []),
|
||||
('[rel^="notstyle"]', []),
|
||||
('link[rel^="notstyle"]', []),
|
||||
('link[href^="bla"]', ['l1']),
|
||||
('a[href^="http://"]', ['bob', 'me']),
|
||||
('[href^="http://"]', ['bob', 'me']),
|
||||
('[id^="p"]', ['pmulti', 'p1']),
|
||||
('[id^="m"]', ['me', 'main']),
|
||||
('div[id^="m"]', ['main']),
|
||||
('a[id^="m"]', ['me']),
|
||||
('div[data-tag^="dashed"]', ['data1'])
|
||||
)
|
||||
|
||||
def test_attribute_endswith(self):
|
||||
self.assert_select_multiple(
|
||||
('[href$=".css"]', ['l1']),
|
||||
('link[href$=".css"]', ['l1']),
|
||||
('link[id$="1"]', ['l1']),
|
||||
('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']),
|
||||
('div[id$="1"]', ['data1']),
|
||||
('[id$="noending"]', []),
|
||||
)
|
||||
|
||||
def test_attribute_contains(self):
|
||||
self.assert_select_multiple(
|
||||
# From test_attribute_startswith
|
||||
('[rel*="style"]', ['l1']),
|
||||
('link[rel*="style"]', ['l1']),
|
||||
('notlink[rel*="notstyle"]', []),
|
||||
('[rel*="notstyle"]', []),
|
||||
('link[rel*="notstyle"]', []),
|
||||
('link[href*="bla"]', ['l1']),
|
||||
('[href*="http://"]', ['bob', 'me']),
|
||||
('[id*="p"]', ['pmulti', 'p1']),
|
||||
('div[id*="m"]', ['main']),
|
||||
('a[id*="m"]', ['me']),
|
||||
# From test_attribute_endswith
|
||||
('[href*=".css"]', ['l1']),
|
||||
('link[href*=".css"]', ['l1']),
|
||||
('link[id*="1"]', ['l1']),
|
||||
('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']),
|
||||
('div[id*="1"]', ['data1']),
|
||||
('[id*="noending"]', []),
|
||||
# New for this test
|
||||
('[href*="."]', ['bob', 'me', 'l1']),
|
||||
('a[href*="."]', ['bob', 'me']),
|
||||
('link[href*="."]', ['l1']),
|
||||
('div[id*="n"]', ['main', 'inner']),
|
||||
('div[id*="nn"]', ['inner']),
|
||||
('div[data-tag*="edval"]', ['data1'])
|
||||
)
|
||||
|
||||
def test_attribute_exact_or_hypen(self):
|
||||
self.assert_select_multiple(
|
||||
('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
|
||||
('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']),
|
||||
('p[lang|="fr"]', ['lang-fr']),
|
||||
('p[lang|="gb"]', []),
|
||||
)
|
||||
|
||||
def test_attribute_exists(self):
|
||||
self.assert_select_multiple(
|
||||
('[rel]', ['l1', 'bob', 'me']),
|
||||
('link[rel]', ['l1']),
|
||||
('a[rel]', ['bob', 'me']),
|
||||
('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']),
|
||||
('p[class]', ['p1', 'pmulti']),
|
||||
('[blah]', []),
|
||||
('p[blah]', []),
|
||||
('div[data-tag]', ['data1'])
|
||||
)
|
||||
|
||||
def test_quoted_space_in_selector_name(self):
|
||||
html = """<div style="display: wrong">nope</div>
|
||||
<div style="display: right">yes</div>
|
||||
"""
|
||||
soup = BeautifulSoup(html, 'html.parser')
|
||||
[chosen] = soup.select('div[style="display: right"]')
|
||||
assert "yes" == chosen.string
|
||||
|
||||
def test_unsupported_pseudoclass(self):
|
||||
with pytest.raises(NotImplementedError):
|
||||
self.soup.select("a:no-such-pseudoclass")
|
||||
|
||||
with pytest.raises(SelectorSyntaxError):
|
||||
self.soup.select("a:nth-of-type(a)")
|
||||
|
||||
def test_nth_of_type(self):
|
||||
# Try to select first paragraph
|
||||
els = self.soup.select('div#inner p:nth-of-type(1)')
|
||||
assert len(els) == 1
|
||||
assert els[0].string == 'Some text'
|
||||
|
||||
# Try to select third paragraph
|
||||
els = self.soup.select('div#inner p:nth-of-type(3)')
|
||||
assert len(els) == 1
|
||||
assert els[0].string == 'Another'
|
||||
|
||||
# Try to select (non-existent!) fourth paragraph
|
||||
els = self.soup.select('div#inner p:nth-of-type(4)')
|
||||
assert len(els) == 0
|
||||
|
||||
# Zero will select no tags.
|
||||
els = self.soup.select('div p:nth-of-type(0)')
|
||||
assert len(els) == 0
|
||||
|
||||
def test_nth_of_type_direct_descendant(self):
|
||||
els = self.soup.select('div#inner > p:nth-of-type(1)')
|
||||
assert len(els) == 1
|
||||
assert els[0].string == 'Some text'
|
||||
|
||||
def test_id_child_selector_nth_of_type(self):
|
||||
self.assert_selects('#inner > p:nth-of-type(2)', ['p1'])
|
||||
|
||||
def test_select_on_element(self):
|
||||
# Other tests operate on the tree; this operates on an element
|
||||
# within the tree.
|
||||
inner = self.soup.find("div", id="main")
|
||||
selected = inner.select("div")
|
||||
# The <div id="inner"> tag was selected. The <div id="footer">
|
||||
# tag was not.
|
||||
self.assert_selects_ids(selected, ['inner', 'data1'])
|
||||
|
||||
def test_overspecified_child_id(self):
|
||||
self.assert_selects(".fancy #inner", ['inner'])
|
||||
self.assert_selects(".normal #inner", [])
|
||||
|
||||
def test_adjacent_sibling_selector(self):
|
||||
self.assert_selects('#p1 + h2', ['header2'])
|
||||
self.assert_selects('#p1 + h2 + p', ['pmulti'])
|
||||
self.assert_selects('#p1 + #header2 + .class1', ['pmulti'])
|
||||
assert [] == self.soup.select('#p1 + p')
|
||||
|
||||
def test_general_sibling_selector(self):
|
||||
self.assert_selects('#p1 ~ h2', ['header2', 'header3'])
|
||||
self.assert_selects('#p1 ~ #header2', ['header2'])
|
||||
self.assert_selects('#p1 ~ h2 + a', ['me'])
|
||||
self.assert_selects('#p1 ~ h2 + [rel="me"]', ['me'])
|
||||
assert [] == self.soup.select('#inner ~ h2')
|
||||
|
||||
def test_dangling_combinator(self):
|
||||
with pytest.raises(SelectorSyntaxError):
|
||||
self.soup.select('h1 >')
|
||||
|
||||
def test_sibling_combinator_wont_select_same_tag_twice(self):
|
||||
self.assert_selects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr'])
|
||||
|
||||
# Test the selector grouping operator (the comma)
|
||||
def test_multiple_select(self):
|
||||
self.assert_selects('x, y', ['xid', 'yid'])
|
||||
|
||||
def test_multiple_select_with_no_space(self):
|
||||
self.assert_selects('x,y', ['xid', 'yid'])
|
||||
|
||||
def test_multiple_select_with_more_space(self):
|
||||
self.assert_selects('x, y', ['xid', 'yid'])
|
||||
|
||||
def test_multiple_select_duplicated(self):
|
||||
self.assert_selects('x, x', ['xid'])
|
||||
|
||||
def test_multiple_select_sibling(self):
|
||||
self.assert_selects('x, y ~ p[lang=fr]', ['xid', 'lang-fr'])
|
||||
|
||||
def test_multiple_select_tag_and_direct_descendant(self):
|
||||
self.assert_selects('x, y > z', ['xid', 'zidb'])
|
||||
|
||||
def test_multiple_select_direct_descendant_and_tags(self):
|
||||
self.assert_selects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
|
||||
|
||||
def test_multiple_select_indirect_descendant(self):
|
||||
self.assert_selects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
|
||||
|
||||
def test_invalid_multiple_select(self):
|
||||
with pytest.raises(SelectorSyntaxError):
|
||||
self.soup.select(',x, y')
|
||||
with pytest.raises(SelectorSyntaxError):
|
||||
self.soup.select('x,,y')
|
||||
|
||||
def test_multiple_select_attrs(self):
|
||||
self.assert_selects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb'])
|
||||
|
||||
def test_multiple_select_ids(self):
|
||||
self.assert_selects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab'])
|
||||
|
||||
def test_multiple_select_nested(self):
|
||||
self.assert_selects('body > div > x, y > z', ['xid', 'zidb'])
|
||||
|
||||
def test_select_duplicate_elements(self):
|
||||
# When markup contains duplicate elements, a multiple select
|
||||
# will find all of them.
|
||||
markup = '<div class="c1"/><div class="c2"/><div class="c1"/>'
|
||||
soup = BeautifulSoup(markup, 'html.parser')
|
||||
selected = soup.select(".c1, .c2")
|
||||
assert 3 == len(selected)
|
||||
|
||||
# Verify that find_all finds the same elements, though because
|
||||
# of an implementation detail it finds them in a different
|
||||
# order.
|
||||
for element in soup.find_all(class_=['c1', 'c2']):
|
||||
assert element in selected
|
||||
|
||||
|
||||
class TestPersistence(SoupTest):
|
||||
"Testing features like pickle and deepcopy."
|
||||
|
||||
def setup_method(self):
|
||||
self.page = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN"
|
||||
"http://www.w3.org/TR/REC-html40/transitional.dtd">
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
<title>Beautiful Soup: We called him Tortoise because he taught us.</title>
|
||||
<link rev="made" href="mailto:leonardr@segfault.org">
|
||||
<meta name="Description" content="Beautiful Soup: an HTML parser optimized for screen-scraping.">
|
||||
<meta name="generator" content="Markov Approximation 1.4 (module: leonardr)">
|
||||
<meta name="author" content="Leonard Richardson">
|
||||
</head>
|
||||
<body>
|
||||
<a href="foo">foo</a>
|
||||
<a href="foo"><b>bar</b></a>
|
||||
</body>
|
||||
</html>"""
|
||||
self.tree = self.soup(self.page)
|
||||
|
||||
def test_pickle_and_unpickle_identity(self):
|
||||
# Pickling a tree, then unpickling it, yields a tree identical
|
||||
# to the original.
|
||||
dumped = pickle.dumps(self.tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
assert loaded.__class__ == BeautifulSoup
|
||||
assert loaded.decode() == self.tree.decode()
|
||||
|
||||
def test_deepcopy_identity(self):
|
||||
# Making a deepcopy of a tree yields an identical tree.
|
||||
copied = copy.deepcopy(self.tree)
|
||||
assert copied.decode() == self.tree.decode()
|
||||
|
||||
def test_copy_preserves_encoding(self):
|
||||
soup = BeautifulSoup(b'<p> </p>', 'html.parser')
|
||||
encoding = soup.original_encoding
|
||||
copy = soup.__copy__()
|
||||
assert "<p> </p>" == str(copy)
|
||||
assert encoding == copy.original_encoding
|
||||
|
||||
def test_copy_preserves_builder_information(self):
|
||||
|
||||
tag = self.soup('<p></p>').p
|
||||
|
||||
# Simulate a tag obtained from a source file.
|
||||
tag.sourceline = 10
|
||||
tag.sourcepos = 33
|
||||
|
||||
copied = tag.__copy__()
|
||||
|
||||
# The TreeBuilder object is no longer availble, but information
|
||||
# obtained from it gets copied over to the new Tag object.
|
||||
assert tag.sourceline == copied.sourceline
|
||||
assert tag.sourcepos == copied.sourcepos
|
||||
assert tag.can_be_empty_element == copied.can_be_empty_element
|
||||
assert tag.cdata_list_attributes == copied.cdata_list_attributes
|
||||
assert tag.preserve_whitespace_tags == copied.preserve_whitespace_tags
|
||||
|
||||
def test_unicode_pickle(self):
|
||||
# A tree containing Unicode characters can be pickled.
|
||||
html = "<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL)
|
||||
loaded = pickle.loads(dumped)
|
||||
assert loaded.decode() == soup.decode()
|
||||
|
||||
def test_copy_navigablestring_is_not_attached_to_tree(self):
|
||||
html = "<b>Foo<a></a></b><b>Bar</b>"
|
||||
soup = self.soup(html)
|
||||
s1 = soup.find(string="Foo")
|
||||
s2 = copy.copy(s1)
|
||||
assert s1 == s2
|
||||
assert None == s2.parent
|
||||
assert None == s2.next_element
|
||||
assert None != s1.next_sibling
|
||||
assert None == s2.next_sibling
|
||||
assert None == s2.previous_element
|
||||
|
||||
def test_copy_navigablestring_subclass_has_same_type(self):
|
||||
html = "<b><!--Foo--></b>"
|
||||
soup = self.soup(html)
|
||||
s1 = soup.string
|
||||
s2 = copy.copy(s1)
|
||||
assert s1 == s2
|
||||
assert isinstance(s2, Comment)
|
||||
|
||||
def test_copy_entire_soup(self):
|
||||
html = "<div><b>Foo<a></a></b><b>Bar</b></div>end"
|
||||
soup = self.soup(html)
|
||||
soup_copy = copy.copy(soup)
|
||||
assert soup == soup_copy
|
||||
|
||||
def test_copy_tag_copies_contents(self):
|
||||
html = "<div><b>Foo<a></a></b><b>Bar</b></div>end"
|
||||
soup = self.soup(html)
|
||||
div = soup.div
|
||||
div_copy = copy.copy(div)
|
||||
|
||||
# The two tags look the same, and evaluate to equal.
|
||||
assert str(div) == str(div_copy)
|
||||
assert div == div_copy
|
||||
|
||||
# But they're not the same object.
|
||||
assert div is not div_copy
|
||||
|
||||
# And they don't have the same relation to the parse tree. The
|
||||
# copy is not associated with a parse tree at all.
|
||||
assert None == div_copy.parent
|
||||
assert None == div_copy.previous_element
|
||||
assert None == div_copy.find(string='Bar').next_element
|
||||
assert None != div.find(string='Bar').next_element
|
||||
|
462
lib/bs4/tests/test_soup.py
Normal file
462
lib/bs4/tests/test_soup.py
Normal file
|
@ -0,0 +1,462 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Tests of Beautiful Soup as a whole."""
|
||||
|
||||
from pdb import set_trace
|
||||
import logging
|
||||
import os
|
||||
import pickle
|
||||
import pytest
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from bs4 import (
|
||||
BeautifulSoup,
|
||||
BeautifulStoneSoup,
|
||||
GuessedAtParserWarning,
|
||||
MarkupResemblesLocatorWarning,
|
||||
dammit,
|
||||
)
|
||||
from bs4.builder import (
|
||||
builder_registry,
|
||||
TreeBuilder,
|
||||
ParserRejectedMarkup,
|
||||
)
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
SoupStrainer,
|
||||
Tag,
|
||||
NavigableString,
|
||||
)
|
||||
|
||||
from . import (
|
||||
default_builder,
|
||||
SoupTest,
|
||||
skipIf,
|
||||
)
|
||||
import warnings
|
||||
|
||||
try:
|
||||
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
|
||||
LXML_PRESENT = True
|
||||
except ImportError as e:
|
||||
LXML_PRESENT = False
|
||||
|
||||
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
|
||||
|
||||
class TestConstructor(SoupTest):
|
||||
|
||||
def test_short_unicode_input(self):
|
||||
data = "<h1>éé</h1>"
|
||||
soup = self.soup(data)
|
||||
assert "éé" == soup.h1.string
|
||||
|
||||
def test_embedded_null(self):
|
||||
data = "<h1>foo\0bar</h1>"
|
||||
soup = self.soup(data)
|
||||
assert "foo\0bar" == soup.h1.string
|
||||
|
||||
def test_exclude_encodings(self):
|
||||
utf8_data = "Räksmörgås".encode("utf-8")
|
||||
soup = self.soup(utf8_data, exclude_encodings=["utf-8"])
|
||||
assert "windows-1252" == soup.original_encoding
|
||||
|
||||
def test_custom_builder_class(self):
|
||||
# Verify that you can pass in a custom Builder class and
|
||||
# it'll be instantiated with the appropriate keyword arguments.
|
||||
class Mock(object):
|
||||
def __init__(self, **kwargs):
|
||||
self.called_with = kwargs
|
||||
self.is_xml = True
|
||||
self.store_line_numbers = False
|
||||
self.cdata_list_attributes = []
|
||||
self.preserve_whitespace_tags = []
|
||||
self.string_containers = {}
|
||||
def initialize_soup(self, soup):
|
||||
pass
|
||||
def feed(self, markup):
|
||||
self.fed = markup
|
||||
def reset(self):
|
||||
pass
|
||||
def ignore(self, ignore):
|
||||
pass
|
||||
set_up_substitutions = can_be_empty_element = ignore
|
||||
def prepare_markup(self, *args, **kwargs):
|
||||
yield "prepared markup", "original encoding", "declared encoding", "contains replacement characters"
|
||||
|
||||
kwargs = dict(
|
||||
var="value",
|
||||
# This is a deprecated BS3-era keyword argument, which
|
||||
# will be stripped out.
|
||||
convertEntities=True,
|
||||
)
|
||||
with warnings.catch_warnings(record=True):
|
||||
soup = BeautifulSoup('', builder=Mock, **kwargs)
|
||||
assert isinstance(soup.builder, Mock)
|
||||
assert dict(var="value") == soup.builder.called_with
|
||||
assert "prepared markup" == soup.builder.fed
|
||||
|
||||
# You can also instantiate the TreeBuilder yourself. In this
|
||||
# case, that specific object is used and any keyword arguments
|
||||
# to the BeautifulSoup constructor are ignored.
|
||||
builder = Mock(**kwargs)
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = BeautifulSoup(
|
||||
'', builder=builder, ignored_value=True,
|
||||
)
|
||||
msg = str(w[0].message)
|
||||
assert msg.startswith("Keyword arguments to the BeautifulSoup constructor will be ignored.")
|
||||
assert builder == soup.builder
|
||||
assert kwargs == builder.called_with
|
||||
|
||||
def test_parser_markup_rejection(self):
|
||||
# If markup is completely rejected by the parser, an
|
||||
# explanatory ParserRejectedMarkup exception is raised.
|
||||
class Mock(TreeBuilder):
|
||||
def feed(self, *args, **kwargs):
|
||||
raise ParserRejectedMarkup("Nope.")
|
||||
|
||||
def prepare_markup(self, *args, **kwargs):
|
||||
# We're going to try two different ways of preparing this markup,
|
||||
# but feed() will reject both of them.
|
||||
yield markup, None, None, False
|
||||
yield markup, None, None, False
|
||||
|
||||
|
||||
import re
|
||||
with pytest.raises(ParserRejectedMarkup) as exc_info:
|
||||
BeautifulSoup('', builder=Mock)
|
||||
assert "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help." in str(exc_info.value)
|
||||
|
||||
def test_cdata_list_attributes(self):
|
||||
# Most attribute values are represented as scalars, but the
|
||||
# HTML standard says that some attributes, like 'class' have
|
||||
# space-separated lists as values.
|
||||
markup = '<a id=" an id " class=" a class "></a>'
|
||||
soup = self.soup(markup)
|
||||
|
||||
# Note that the spaces are stripped for 'class' but not for 'id'.
|
||||
a = soup.a
|
||||
assert " an id " == a['id']
|
||||
assert ["a", "class"] == a['class']
|
||||
|
||||
# TreeBuilder takes an argument called 'mutli_valued_attributes' which lets
|
||||
# you customize or disable this. As always, you can customize the TreeBuilder
|
||||
# by passing in a keyword argument to the BeautifulSoup constructor.
|
||||
soup = self.soup(markup, builder=default_builder, multi_valued_attributes=None)
|
||||
assert " a class " == soup.a['class']
|
||||
|
||||
# Here are two ways of saying that `id` is a multi-valued
|
||||
# attribute in this context, but 'class' is not.
|
||||
for switcheroo in ({'*': 'id'}, {'a': 'id'}):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
# This will create a warning about not explicitly
|
||||
# specifying a parser, but we'll ignore it.
|
||||
soup = self.soup(markup, builder=None, multi_valued_attributes=switcheroo)
|
||||
a = soup.a
|
||||
assert ["an", "id"] == a['id']
|
||||
assert " a class " == a['class']
|
||||
|
||||
def test_replacement_classes(self):
|
||||
# Test the ability to pass in replacements for element classes
|
||||
# which will be used when building the tree.
|
||||
class TagPlus(Tag):
|
||||
pass
|
||||
|
||||
class StringPlus(NavigableString):
|
||||
pass
|
||||
|
||||
class CommentPlus(Comment):
|
||||
pass
|
||||
|
||||
soup = self.soup(
|
||||
"<a><b>foo</b>bar</a><!--whee-->",
|
||||
element_classes = {
|
||||
Tag: TagPlus,
|
||||
NavigableString: StringPlus,
|
||||
Comment: CommentPlus,
|
||||
}
|
||||
)
|
||||
|
||||
# The tree was built with TagPlus, StringPlus, and CommentPlus objects,
|
||||
# rather than Tag, String, and Comment objects.
|
||||
assert all(
|
||||
isinstance(x, (TagPlus, StringPlus, CommentPlus))
|
||||
for x in soup.recursiveChildGenerator()
|
||||
)
|
||||
|
||||
def test_alternate_string_containers(self):
|
||||
# Test the ability to customize the string containers for
|
||||
# different types of tags.
|
||||
class PString(NavigableString):
|
||||
pass
|
||||
|
||||
class BString(NavigableString):
|
||||
pass
|
||||
|
||||
soup = self.soup(
|
||||
"<div>Hello.<p>Here is <b>some <i>bolded</i></b> text",
|
||||
string_containers = {
|
||||
'b': BString,
|
||||
'p': PString,
|
||||
}
|
||||
)
|
||||
|
||||
# The string before the <p> tag is a regular NavigableString.
|
||||
assert isinstance(soup.div.contents[0], NavigableString)
|
||||
|
||||
# The string inside the <p> tag, but not inside the <i> tag,
|
||||
# is a PString.
|
||||
assert isinstance(soup.p.contents[0], PString)
|
||||
|
||||
# Every string inside the <b> tag is a BString, even the one that
|
||||
# was also inside an <i> tag.
|
||||
for s in soup.b.strings:
|
||||
assert isinstance(s, BString)
|
||||
|
||||
# Now that parsing was complete, the string_container_stack
|
||||
# (where this information was kept) has been cleared out.
|
||||
assert [] == soup.string_container_stack
|
||||
|
||||
|
||||
class TestWarnings(SoupTest):
|
||||
|
||||
def _assert_warning(self, warnings, cls):
|
||||
for w in warnings:
|
||||
if isinstance(w.message, cls):
|
||||
return w
|
||||
raise Exception("%s warning not found in %r" % (cls, warnings))
|
||||
|
||||
def _assert_no_parser_specified(self, w):
|
||||
warning = self._assert_warning(w, GuessedAtParserWarning)
|
||||
message = str(warning.message)
|
||||
assert message.startswith(BeautifulSoup.NO_PARSER_SPECIFIED_WARNING[:60])
|
||||
|
||||
def test_warning_if_no_parser_specified(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = BeautifulSoup("<a><b></b></a>")
|
||||
self._assert_no_parser_specified(w)
|
||||
|
||||
def test_warning_if_parser_specified_too_vague(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = BeautifulSoup("<a><b></b></a>", "html")
|
||||
self._assert_no_parser_specified(w)
|
||||
|
||||
def test_no_warning_if_explicit_parser_specified(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = BeautifulSoup("<a><b></b></a>", "html.parser")
|
||||
assert [] == w
|
||||
|
||||
def test_parseOnlyThese_renamed_to_parse_only(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("<a><b></b></a>", parseOnlyThese=SoupStrainer("b"))
|
||||
msg = str(w[0].message)
|
||||
assert "parseOnlyThese" in msg
|
||||
assert "parse_only" in msg
|
||||
assert b"<b></b>" == soup.encode()
|
||||
|
||||
def test_fromEncoding_renamed_to_from_encoding(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
utf8 = b"\xc3\xa9"
|
||||
soup = self.soup(utf8, fromEncoding="utf8")
|
||||
msg = str(w[0].message)
|
||||
assert "fromEncoding" in msg
|
||||
assert "from_encoding" in msg
|
||||
assert "utf8" == soup.original_encoding
|
||||
|
||||
def test_unrecognized_keyword_argument(self):
|
||||
with pytest.raises(TypeError):
|
||||
self.soup("<a>", no_such_argument=True)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"extension",
|
||||
['markup.html', 'markup.htm', 'markup.HTML', 'markup.txt',
|
||||
'markup.xhtml', 'markup.xml', "/home/user/file", "c:\\user\file"]
|
||||
)
|
||||
def test_resembles_filename_warning(self, extension):
|
||||
# A warning is issued if the "markup" looks like the name of
|
||||
# an HTML or text file, or a full path to a file on disk.
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("markup" + extension)
|
||||
warning = self._assert_warning(w, MarkupResemblesLocatorWarning)
|
||||
assert "looks more like a filename" in str(warning.message)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"extension",
|
||||
['markuphtml', 'markup.com', '', 'markup.js']
|
||||
)
|
||||
def test_resembles_filename_no_warning(self, extension):
|
||||
# The 'looks more like a filename' warning is not issued if
|
||||
# the markup looks like a bare string, a domain name, or a
|
||||
# file that's not an HTML file.
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("markup" + extension)
|
||||
assert [] == w
|
||||
|
||||
def test_url_warning_with_bytes_url(self):
|
||||
url = b"http://www.crummybytes.com/"
|
||||
with warnings.catch_warnings(record=True) as warning_list:
|
||||
soup = self.soup(url)
|
||||
warning = self._assert_warning(
|
||||
warning_list, MarkupResemblesLocatorWarning
|
||||
)
|
||||
assert "looks more like a URL" in str(warning.message)
|
||||
assert url not in str(warning.message).encode("utf8")
|
||||
|
||||
def test_url_warning_with_unicode_url(self):
|
||||
url = "http://www.crummyunicode.com/"
|
||||
with warnings.catch_warnings(record=True) as warning_list:
|
||||
# note - this url must differ from the bytes one otherwise
|
||||
# python's warnings system swallows the second warning
|
||||
soup = self.soup(url)
|
||||
warning = self._assert_warning(
|
||||
warning_list, MarkupResemblesLocatorWarning
|
||||
)
|
||||
assert "looks more like a URL" in str(warning.message)
|
||||
assert url not in str(warning.message)
|
||||
|
||||
def test_url_warning_with_bytes_and_space(self):
|
||||
# Here the markup contains something besides a URL, so no warning
|
||||
# is issued.
|
||||
with warnings.catch_warnings(record=True) as warning_list:
|
||||
soup = self.soup(b"http://www.crummybytes.com/ is great")
|
||||
assert not any("looks more like a URL" in str(w.message)
|
||||
for w in warning_list)
|
||||
|
||||
def test_url_warning_with_unicode_and_space(self):
|
||||
with warnings.catch_warnings(record=True) as warning_list:
|
||||
soup = self.soup("http://www.crummyunicode.com/ is great")
|
||||
assert not any("looks more like a URL" in str(w.message)
|
||||
for w in warning_list)
|
||||
|
||||
|
||||
class TestSelectiveParsing(SoupTest):
|
||||
|
||||
def test_parse_with_soupstrainer(self):
|
||||
markup = "No<b>Yes</b><a>No<b>Yes <c>Yes</c></b>"
|
||||
strainer = SoupStrainer("b")
|
||||
soup = self.soup(markup, parse_only=strainer)
|
||||
assert soup.encode() == b"<b>Yes</b><b>Yes <c>Yes</c></b>"
|
||||
|
||||
|
||||
class TestNewTag(SoupTest):
|
||||
"""Test the BeautifulSoup.new_tag() method."""
|
||||
def test_new_tag(self):
|
||||
soup = self.soup("")
|
||||
new_tag = soup.new_tag("foo", bar="baz", attrs={"name": "a name"})
|
||||
assert isinstance(new_tag, Tag)
|
||||
assert "foo" == new_tag.name
|
||||
assert dict(bar="baz", name="a name") == new_tag.attrs
|
||||
assert None == new_tag.parent
|
||||
|
||||
def test_tag_inherits_self_closing_rules_from_builder(self):
|
||||
if LXML_PRESENT:
|
||||
xml_soup = BeautifulSoup("", "lxml-xml")
|
||||
xml_br = xml_soup.new_tag("br")
|
||||
xml_p = xml_soup.new_tag("p")
|
||||
|
||||
# Both the <br> and <p> tag are empty-element, just because
|
||||
# they have no contents.
|
||||
assert b"<br/>" == xml_br.encode()
|
||||
assert b"<p/>" == xml_p.encode()
|
||||
|
||||
html_soup = BeautifulSoup("", "html.parser")
|
||||
html_br = html_soup.new_tag("br")
|
||||
html_p = html_soup.new_tag("p")
|
||||
|
||||
# The HTML builder users HTML's rules about which tags are
|
||||
# empty-element tags, and the new tags reflect these rules.
|
||||
assert b"<br/>" == html_br.encode()
|
||||
assert b"<p></p>" == html_p.encode()
|
||||
|
||||
class TestNewString(SoupTest):
|
||||
"""Test the BeautifulSoup.new_string() method."""
|
||||
def test_new_string_creates_navigablestring(self):
|
||||
soup = self.soup("")
|
||||
s = soup.new_string("foo")
|
||||
assert "foo" == s
|
||||
assert isinstance(s, NavigableString)
|
||||
|
||||
def test_new_string_can_create_navigablestring_subclass(self):
|
||||
soup = self.soup("")
|
||||
s = soup.new_string("foo", Comment)
|
||||
assert "foo" == s
|
||||
assert isinstance(s, Comment)
|
||||
|
||||
|
||||
class TestPickle(SoupTest):
|
||||
# Test our ability to pickle the BeautifulSoup object itself.
|
||||
|
||||
def test_normal_pickle(self):
|
||||
soup = self.soup("<a>some markup</a>")
|
||||
pickled = pickle.dumps(soup)
|
||||
unpickled = pickle.loads(pickled)
|
||||
assert "some markup" == unpickled.a.string
|
||||
|
||||
def test_pickle_with_no_builder(self):
|
||||
# We had a bug that prevented pickling from working if
|
||||
# the builder wasn't set.
|
||||
soup = self.soup("some markup")
|
||||
soup.builder = None
|
||||
pickled = pickle.dumps(soup)
|
||||
unpickled = pickle.loads(pickled)
|
||||
assert "some markup" == unpickled.string
|
||||
|
||||
class TestEncodingConversion(SoupTest):
|
||||
# Test Beautiful Soup's ability to decode and encode from various
|
||||
# encodings.
|
||||
|
||||
def setup_method(self):
|
||||
self.unicode_data = '<html><head><meta charset="utf-8"/></head><body><foo>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</foo></body></html>'
|
||||
self.utf8_data = self.unicode_data.encode("utf-8")
|
||||
# Just so you know what it looks like.
|
||||
assert self.utf8_data == b'<html><head><meta charset="utf-8"/></head><body><foo>Sacr\xc3\xa9 bleu!</foo></body></html>'
|
||||
|
||||
def test_ascii_in_unicode_out(self):
|
||||
# ASCII input is converted to Unicode. The original_encoding
|
||||
# attribute is set to 'utf-8', a superset of ASCII.
|
||||
chardet = dammit.chardet_dammit
|
||||
logging.disable(logging.WARNING)
|
||||
try:
|
||||
def noop(str):
|
||||
return None
|
||||
# Disable chardet, which will realize that the ASCII is ASCII.
|
||||
dammit.chardet_dammit = noop
|
||||
ascii = b"<foo>a</foo>"
|
||||
soup_from_ascii = self.soup(ascii)
|
||||
unicode_output = soup_from_ascii.decode()
|
||||
assert isinstance(unicode_output, str)
|
||||
assert unicode_output == self.document_for(ascii.decode())
|
||||
assert soup_from_ascii.original_encoding.lower() == "utf-8"
|
||||
finally:
|
||||
logging.disable(logging.NOTSET)
|
||||
dammit.chardet_dammit = chardet
|
||||
|
||||
def test_unicode_in_unicode_out(self):
|
||||
# Unicode input is left alone. The original_encoding attribute
|
||||
# is not set.
|
||||
soup_from_unicode = self.soup(self.unicode_data)
|
||||
assert soup_from_unicode.decode() == self.unicode_data
|
||||
assert soup_from_unicode.foo.string == 'Sacr\xe9 bleu!'
|
||||
assert soup_from_unicode.original_encoding == None
|
||||
|
||||
def test_utf8_in_unicode_out(self):
|
||||
# UTF-8 input is converted to Unicode. The original_encoding
|
||||
# attribute is set.
|
||||
soup_from_utf8 = self.soup(self.utf8_data)
|
||||
assert soup_from_utf8.decode() == self.unicode_data
|
||||
assert soup_from_utf8.foo.string == 'Sacr\xe9 bleu!'
|
||||
|
||||
def test_utf8_out(self):
|
||||
# The internal data structures can be encoded as UTF-8.
|
||||
soup_from_unicode = self.soup(self.unicode_data)
|
||||
assert soup_from_unicode.encode('utf-8') == self.utf8_data
|
||||
|
||||
@skipIf(
|
||||
PYTHON_3_PRE_3_2,
|
||||
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
|
||||
def test_attribute_name_containing_unicode_characters(self):
|
||||
markup = '<div><a \N{SNOWMAN}="snowman"></a></div>'
|
||||
assert self.soup(markup).div.encode("utf8") == markup.encode("utf8")
|
||||
|
||||
|
||||
|
221
lib/bs4/tests/test_tag.py
Normal file
221
lib/bs4/tests/test_tag.py
Normal file
|
@ -0,0 +1,221 @@
|
|||
import warnings
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
NavigableString,
|
||||
)
|
||||
from . import SoupTest
|
||||
|
||||
class TestTag(SoupTest):
|
||||
"""Test various methods of Tag which aren't so complicated they
|
||||
need their own classes.
|
||||
"""
|
||||
|
||||
def test__should_pretty_print(self):
|
||||
# Test the rules about when a tag should be pretty-printed.
|
||||
tag = self.soup("").new_tag("a_tag")
|
||||
|
||||
# No list of whitespace-preserving tags -> pretty-print
|
||||
tag._preserve_whitespace_tags = None
|
||||
assert True == tag._should_pretty_print(0)
|
||||
|
||||
# List exists but tag is not on the list -> pretty-print
|
||||
tag.preserve_whitespace_tags = ["some_other_tag"]
|
||||
assert True == tag._should_pretty_print(1)
|
||||
|
||||
# Indent level is None -> don't pretty-print
|
||||
assert False == tag._should_pretty_print(None)
|
||||
|
||||
# Tag is on the whitespace-preserving list -> don't pretty-print
|
||||
tag.preserve_whitespace_tags = ["some_other_tag", "a_tag"]
|
||||
assert False == tag._should_pretty_print(1)
|
||||
|
||||
def test_len(self):
|
||||
"""The length of a Tag is its number of children."""
|
||||
soup = self.soup("<top>1<b>2</b>3</top>")
|
||||
|
||||
# The BeautifulSoup object itself contains one element: the
|
||||
# <top> tag.
|
||||
assert len(soup.contents) == 1
|
||||
assert len(soup) == 1
|
||||
|
||||
# The <top> tag contains three elements: the text node "1", the
|
||||
# <b> tag, and the text node "3".
|
||||
assert len(soup.top) == 3
|
||||
assert len(soup.top.contents) == 3
|
||||
|
||||
def test_member_access_invokes_find(self):
|
||||
"""Accessing a Python member .foo invokes find('foo')"""
|
||||
soup = self.soup('<b><i></i></b>')
|
||||
assert soup.b == soup.find('b')
|
||||
assert soup.b.i == soup.find('b').find('i')
|
||||
assert soup.a == None
|
||||
|
||||
def test_deprecated_member_access(self):
|
||||
soup = self.soup('<b><i></i></b>')
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
tag = soup.bTag
|
||||
assert soup.b == tag
|
||||
assert '.bTag is deprecated, use .find("b") instead. If you really were looking for a tag called bTag, use .find("bTag")' == str(w[0].message)
|
||||
|
||||
def test_has_attr(self):
|
||||
"""has_attr() checks for the presence of an attribute.
|
||||
|
||||
Please note note: has_attr() is different from
|
||||
__in__. has_attr() checks the tag's attributes and __in__
|
||||
checks the tag's chidlren.
|
||||
"""
|
||||
soup = self.soup("<foo attr='bar'>")
|
||||
assert soup.foo.has_attr('attr')
|
||||
assert not soup.foo.has_attr('attr2')
|
||||
|
||||
def test_attributes_come_out_in_alphabetical_order(self):
|
||||
markup = '<b a="1" z="5" m="3" f="2" y="4"></b>'
|
||||
self.assertSoupEquals(markup, '<b a="1" f="2" m="3" y="4" z="5"></b>')
|
||||
|
||||
def test_string(self):
|
||||
# A Tag that contains only a text node makes that node
|
||||
# available as .string.
|
||||
soup = self.soup("<b>foo</b>")
|
||||
assert soup.b.string == 'foo'
|
||||
|
||||
def test_empty_tag_has_no_string(self):
|
||||
# A Tag with no children has no .stirng.
|
||||
soup = self.soup("<b></b>")
|
||||
assert soup.b.string == None
|
||||
|
||||
def test_tag_with_multiple_children_has_no_string(self):
|
||||
# A Tag with no children has no .string.
|
||||
soup = self.soup("<a>foo<b></b><b></b></b>")
|
||||
assert soup.b.string == None
|
||||
|
||||
soup = self.soup("<a>foo<b></b>bar</b>")
|
||||
assert soup.b.string == None
|
||||
|
||||
# Even if all the children are strings, due to trickery,
|
||||
# it won't work--but this would be a good optimization.
|
||||
soup = self.soup("<a>foo</b>")
|
||||
soup.a.insert(1, "bar")
|
||||
assert soup.a.string == None
|
||||
|
||||
def test_tag_with_recursive_string_has_string(self):
|
||||
# A Tag with a single child which has a .string inherits that
|
||||
# .string.
|
||||
soup = self.soup("<a><b>foo</b></a>")
|
||||
assert soup.a.string == "foo"
|
||||
assert soup.string == "foo"
|
||||
|
||||
def test_lack_of_string(self):
|
||||
"""Only a Tag containing a single text node has a .string."""
|
||||
soup = self.soup("<b>f<i>e</i>o</b>")
|
||||
assert soup.b.string is None
|
||||
|
||||
soup = self.soup("<b></b>")
|
||||
assert soup.b.string is None
|
||||
|
||||
def test_all_text(self):
|
||||
"""Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated"""
|
||||
soup = self.soup("<a>a<b>r</b> <r> t </r></a>")
|
||||
assert soup.a.text == "ar t "
|
||||
assert soup.a.get_text(strip=True) == "art"
|
||||
assert soup.a.get_text(",") == "a,r, , t "
|
||||
assert soup.a.get_text(",", strip=True) == "a,r,t"
|
||||
|
||||
def test_get_text_ignores_special_string_containers(self):
|
||||
soup = self.soup("foo<!--IGNORE-->bar")
|
||||
assert soup.get_text() == "foobar"
|
||||
|
||||
assert soup.get_text(types=(NavigableString, Comment)) == "fooIGNOREbar"
|
||||
assert soup.get_text(types=None) == "fooIGNOREbar"
|
||||
|
||||
soup = self.soup("foo<style>CSS</style><script>Javascript</script>bar")
|
||||
assert soup.get_text() == "foobar"
|
||||
|
||||
def test_all_strings_ignores_special_string_containers(self):
|
||||
soup = self.soup("foo<!--IGNORE-->bar")
|
||||
assert ['foo', 'bar'] == list(soup.strings)
|
||||
|
||||
soup = self.soup("foo<style>CSS</style><script>Javascript</script>bar")
|
||||
assert ['foo', 'bar'] == list(soup.strings)
|
||||
|
||||
def test_string_methods_inside_special_string_container_tags(self):
|
||||
# Strings inside tags like <script> are generally ignored by
|
||||
# methods like get_text, because they're not what humans
|
||||
# consider 'text'. But if you call get_text on the <script>
|
||||
# tag itself, those strings _are_ considered to be 'text',
|
||||
# because there's nothing else you might be looking for.
|
||||
|
||||
style = self.soup("<div>a<style>Some CSS</style></div>")
|
||||
template = self.soup("<div>a<template><p>Templated <b>text</b>.</p><!--With a comment.--></template></div>")
|
||||
script = self.soup("<div>a<script><!--a comment-->Some text</script></div>")
|
||||
|
||||
assert style.div.get_text() == "a"
|
||||
assert list(style.div.strings) == ["a"]
|
||||
assert style.div.style.get_text() == "Some CSS"
|
||||
assert list(style.div.style.strings) == ['Some CSS']
|
||||
|
||||
# The comment is not picked up here. That's because it was
|
||||
# parsed into a Comment object, which is not considered
|
||||
# interesting by template.strings.
|
||||
assert template.div.get_text() == "a"
|
||||
assert list(template.div.strings) == ["a"]
|
||||
assert template.div.template.get_text() == "Templated text."
|
||||
assert list(template.div.template.strings) == ["Templated ", "text", "."]
|
||||
|
||||
# The comment is included here, because it didn't get parsed
|
||||
# into a Comment object--it's part of the Script string.
|
||||
assert script.div.get_text() == "a"
|
||||
assert list(script.div.strings) == ["a"]
|
||||
assert script.div.script.get_text() == "<!--a comment-->Some text"
|
||||
assert list(script.div.script.strings) == ['<!--a comment-->Some text']
|
||||
|
||||
|
||||
class TestMultiValuedAttributes(SoupTest):
|
||||
"""Test the behavior of multi-valued attributes like 'class'.
|
||||
|
||||
The values of such attributes are always presented as lists.
|
||||
"""
|
||||
|
||||
def test_single_value_becomes_list(self):
|
||||
soup = self.soup("<a class='foo'>")
|
||||
assert ["foo"] ==soup.a['class']
|
||||
|
||||
def test_multiple_values_becomes_list(self):
|
||||
soup = self.soup("<a class='foo bar'>")
|
||||
assert ["foo", "bar"] == soup.a['class']
|
||||
|
||||
def test_multiple_values_separated_by_weird_whitespace(self):
|
||||
soup = self.soup("<a class='foo\tbar\nbaz'>")
|
||||
assert ["foo", "bar", "baz"] ==soup.a['class']
|
||||
|
||||
def test_attributes_joined_into_string_on_output(self):
|
||||
soup = self.soup("<a class='foo\tbar'>")
|
||||
assert b'<a class="foo bar"></a>' == soup.a.encode()
|
||||
|
||||
def test_get_attribute_list(self):
|
||||
soup = self.soup("<a id='abc def'>")
|
||||
assert ['abc def'] == soup.a.get_attribute_list('id')
|
||||
|
||||
def test_accept_charset(self):
|
||||
soup = self.soup('<form accept-charset="ISO-8859-1 UTF-8">')
|
||||
assert ['ISO-8859-1', 'UTF-8'] == soup.form['accept-charset']
|
||||
|
||||
def test_cdata_attribute_applying_only_to_one_tag(self):
|
||||
data = '<a accept-charset="ISO-8859-1 UTF-8"></a>'
|
||||
soup = self.soup(data)
|
||||
# We saw in another test that accept-charset is a cdata-list
|
||||
# attribute for the <form> tag. But it's not a cdata-list
|
||||
# attribute for any other tag.
|
||||
assert 'ISO-8859-1 UTF-8' == soup.a['accept-charset']
|
||||
|
||||
def test_customization(self):
|
||||
# It's possible to change which attributes of which tags
|
||||
# are treated as multi-valued attributes.
|
||||
#
|
||||
# Here, 'id' is a multi-valued attribute and 'class' is not.
|
||||
#
|
||||
# TODO: This code is in the builder and should be tested there.
|
||||
soup = self.soup(
|
||||
'<a class="foo" id="bar">', multi_valued_attributes={ '*' : 'id' }
|
||||
)
|
||||
assert soup.a['class'] == 'foo'
|
||||
assert soup.a['id'] == ['bar']
|
1290
lib/bs4/tests/test_tree.py
Normal file
1290
lib/bs4/tests/test_tree.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -192,5 +192,5 @@ def parse_version(ver: str) -> Version:
|
|||
return Version(major, minor, micro, release, pre, post, dev)
|
||||
|
||||
|
||||
__version_info__ = Version(2, 3, 1, "final")
|
||||
__version_info__ = Version(2, 3, 2, "final", post=1)
|
||||
__version__ = __version_info__._get_canonical()
|
||||
|
|
|
@ -84,7 +84,7 @@ class _DocumentNav:
|
|||
|
||||
# Fail on unexpected types.
|
||||
if not cls.is_tag(tag):
|
||||
raise TypeError("Expected a BeautifulSoup 'Tag', but instead recieved type {}".format(type(tag)))
|
||||
raise TypeError("Expected a BeautifulSoup 'Tag', but instead received type {}".format(type(tag)))
|
||||
|
||||
@staticmethod
|
||||
def is_doc(obj: 'bs4.Tag') -> bool:
|
||||
|
|
|
@ -562,7 +562,7 @@ class CSSParser:
|
|||
selector = self.custom.get(pseudo)
|
||||
if selector is None:
|
||||
raise SelectorSyntaxError(
|
||||
"Undefined custom selector '{}' found at postion {}".format(pseudo, m.end(0)),
|
||||
"Undefined custom selector '{}' found at position {}".format(pseudo, m.end(0)),
|
||||
self.pattern,
|
||||
m.end(0)
|
||||
)
|
||||
|
@ -828,7 +828,7 @@ class CSSParser:
|
|||
if not has_selector:
|
||||
if not is_forgive or combinator != COMMA_COMBINATOR:
|
||||
raise SelectorSyntaxError(
|
||||
"The combinator '{}' at postion {}, must have a selector before it".format(combinator, index),
|
||||
"The combinator '{}' at position {}, must have a selector before it".format(combinator, index),
|
||||
self.pattern,
|
||||
index
|
||||
)
|
||||
|
@ -1007,7 +1007,7 @@ class CSSParser:
|
|||
if not has_selector:
|
||||
if not is_forgive:
|
||||
raise SelectorSyntaxError(
|
||||
"Expected a selector at postion {}".format(m.start(0)),
|
||||
"Expected a selector at position {}".format(m.start(0)),
|
||||
self.pattern,
|
||||
m.start(0)
|
||||
)
|
||||
|
@ -1017,7 +1017,7 @@ class CSSParser:
|
|||
break
|
||||
else:
|
||||
raise SelectorSyntaxError(
|
||||
"Unmatched pseudo-class close at postion {}".format(m.start(0)),
|
||||
"Unmatched pseudo-class close at position {}".format(m.start(0)),
|
||||
self.pattern,
|
||||
m.start(0)
|
||||
)
|
||||
|
|
|
@ -4,7 +4,7 @@ arrow==1.2.2
|
|||
backports.csv==1.0.7
|
||||
backports.functools-lru-cache==1.6.4
|
||||
backports.zoneinfo==0.2.1
|
||||
beautifulsoup4==4.10.0
|
||||
beautifulsoup4==4.11.1
|
||||
bleach==5.0.0
|
||||
certifi==2021.10.8
|
||||
cheroot==8.6.0
|
||||
|
@ -40,7 +40,7 @@ requests-oauthlib==1.3.1
|
|||
rumps==0.3.0; platform_system == "Darwin"
|
||||
simplejson==3.17.6
|
||||
six==1.16.0
|
||||
soupsieve==2.3.1
|
||||
soupsieve==2.3.2.post1
|
||||
tempora==5.0.1
|
||||
tokenize-rt==4.2.1
|
||||
tzdata==2022.1
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue