mirror of
https://github.com/clinton-hall/nzbToMedia.git
synced 2025-07-15 01:32:53 -07:00
updated bs4 lib. Fixes #1128
This commit is contained in:
parent
c94731bcbb
commit
c14618ebdd
15 changed files with 1077 additions and 237 deletions
|
@ -5,26 +5,31 @@ http://www.crummy.com/software/BeautifulSoup/
|
|||
|
||||
Beautiful Soup uses a pluggable XML or HTML parser to parse a
|
||||
(possibly invalid) document into a tree representation. Beautiful Soup
|
||||
provides provides methods and Pythonic idioms that make it easy to
|
||||
navigate, search, and modify the parse tree.
|
||||
provides methods and Pythonic idioms that make it easy to navigate,
|
||||
search, and modify the parse tree.
|
||||
|
||||
Beautiful Soup works with Python 2.6 and up. It works better if lxml
|
||||
Beautiful Soup works with Python 2.7 and up. It works better if lxml
|
||||
and/or html5lib is installed.
|
||||
|
||||
For more than you ever wanted to know about Beautiful Soup, see the
|
||||
documentation:
|
||||
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
||||
|
||||
"""
|
||||
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
__author__ = "Leonard Richardson (leonardr@segfault.org)"
|
||||
__version__ = "4.3.2"
|
||||
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
|
||||
__version__ = "4.5.1"
|
||||
__copyright__ = "Copyright (c) 2004-2016 Leonard Richardson"
|
||||
__license__ = "MIT"
|
||||
|
||||
__all__ = ['BeautifulSoup']
|
||||
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
import warnings
|
||||
|
||||
from .builder import builder_registry, ParserRejectedMarkup
|
||||
|
@ -45,7 +50,7 @@ from .element import (
|
|||
|
||||
# The very first thing we do is give a useful error if someone is
|
||||
# running this code under Python 3 without converting it.
|
||||
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
|
||||
'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work.'<>'You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
|
||||
|
||||
class BeautifulSoup(Tag):
|
||||
"""
|
||||
|
@ -77,8 +82,11 @@ class BeautifulSoup(Tag):
|
|||
|
||||
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
|
||||
|
||||
NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, change code that looks like this:\n\n BeautifulSoup([your markup])\n\nto this:\n\n BeautifulSoup([your markup], \"%(parser)s\")\n"
|
||||
|
||||
def __init__(self, markup="", features=None, builder=None,
|
||||
parse_only=None, from_encoding=None, **kwargs):
|
||||
parse_only=None, from_encoding=None, exclude_encodings=None,
|
||||
**kwargs):
|
||||
"""The Soup object is initialized as the 'root tag', and the
|
||||
provided markup (which can be a string or a file-like object)
|
||||
is fed into the underlying parser."""
|
||||
|
@ -114,9 +122,9 @@ class BeautifulSoup(Tag):
|
|||
del kwargs['isHTML']
|
||||
warnings.warn(
|
||||
"BS4 does not respect the isHTML argument to the "
|
||||
"BeautifulSoup constructor. You can pass in features='html' "
|
||||
"or features='xml' to get a builder capable of handling "
|
||||
"one or the other.")
|
||||
"BeautifulSoup constructor. Suggest you use "
|
||||
"features='lxml' for HTML and features='lxml-xml' for "
|
||||
"XML.")
|
||||
|
||||
def deprecated_argument(old_name, new_name):
|
||||
if old_name in kwargs:
|
||||
|
@ -134,12 +142,17 @@ class BeautifulSoup(Tag):
|
|||
from_encoding = from_encoding or deprecated_argument(
|
||||
"fromEncoding", "from_encoding")
|
||||
|
||||
if from_encoding and isinstance(markup, unicode):
|
||||
warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.")
|
||||
from_encoding = None
|
||||
|
||||
if len(kwargs) > 0:
|
||||
arg = kwargs.keys().pop()
|
||||
raise TypeError(
|
||||
"__init__() got an unexpected keyword argument '%s'" % arg)
|
||||
|
||||
if builder is None:
|
||||
original_features = features
|
||||
if isinstance(features, basestring):
|
||||
features = [features]
|
||||
if features is None or len(features) == 0:
|
||||
|
@ -151,15 +164,35 @@ class BeautifulSoup(Tag):
|
|||
"requested: %s. Do you need to install a parser library?"
|
||||
% ",".join(features))
|
||||
builder = builder_class()
|
||||
if not (original_features == builder.NAME or
|
||||
original_features in builder.ALTERNATE_NAMES):
|
||||
if builder.is_xml:
|
||||
markup_type = "XML"
|
||||
else:
|
||||
markup_type = "HTML"
|
||||
|
||||
caller = traceback.extract_stack()[0]
|
||||
filename = caller[0]
|
||||
line_number = caller[1]
|
||||
warnings.warn(self.NO_PARSER_SPECIFIED_WARNING % dict(
|
||||
filename=filename,
|
||||
line_number=line_number,
|
||||
parser=builder.NAME,
|
||||
markup_type=markup_type))
|
||||
|
||||
self.builder = builder
|
||||
self.is_xml = builder.is_xml
|
||||
self.known_xml = self.is_xml
|
||||
self.builder.soup = self
|
||||
|
||||
self.parse_only = parse_only
|
||||
|
||||
if hasattr(markup, 'read'): # It's a file-type object.
|
||||
markup = markup.read()
|
||||
elif len(markup) <= 256:
|
||||
elif len(markup) <= 256 and (
|
||||
(isinstance(markup, bytes) and not b'<' in markup)
|
||||
or (isinstance(markup, unicode) and not u'<' in markup)
|
||||
):
|
||||
# Print out warnings for a couple beginner problems
|
||||
# involving passing non-markup to Beautiful Soup.
|
||||
# Beautiful Soup will still parse the input as markup,
|
||||
|
@ -178,19 +211,18 @@ class BeautifulSoup(Tag):
|
|||
# system. Just let it go.
|
||||
pass
|
||||
if is_file:
|
||||
if isinstance(markup, unicode):
|
||||
markup = markup.encode("utf8")
|
||||
warnings.warn(
|
||||
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
|
||||
if markup[:5] == "http:" or markup[:6] == "https:":
|
||||
# TODO: This is ugly but I couldn't get it to work in
|
||||
# Python 3 otherwise.
|
||||
if ((isinstance(markup, bytes) and not b' ' in markup)
|
||||
or (isinstance(markup, unicode) and not u' ' in markup)):
|
||||
warnings.warn(
|
||||
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
|
||||
'"%s" looks like a filename, not markup. You should'
|
||||
'probably open this file and pass the filehandle into'
|
||||
'Beautiful Soup.' % markup)
|
||||
self._check_markup_is_url(markup)
|
||||
|
||||
for (self.markup, self.original_encoding, self.declared_html_encoding,
|
||||
self.contains_replacement_characters) in (
|
||||
self.builder.prepare_markup(markup, from_encoding)):
|
||||
self.builder.prepare_markup(
|
||||
markup, from_encoding, exclude_encodings=exclude_encodings)):
|
||||
self.reset()
|
||||
try:
|
||||
self._feed()
|
||||
|
@ -203,6 +235,53 @@ class BeautifulSoup(Tag):
|
|||
self.markup = None
|
||||
self.builder.soup = None
|
||||
|
||||
def __copy__(self):
|
||||
copy = type(self)(
|
||||
self.encode('utf-8'), builder=self.builder, from_encoding='utf-8'
|
||||
)
|
||||
|
||||
# Although we encoded the tree to UTF-8, that may not have
|
||||
# been the encoding of the original markup. Set the copy's
|
||||
# .original_encoding to reflect the original object's
|
||||
# .original_encoding.
|
||||
copy.original_encoding = self.original_encoding
|
||||
return copy
|
||||
|
||||
def __getstate__(self):
|
||||
# Frequently a tree builder can't be pickled.
|
||||
d = dict(self.__dict__)
|
||||
if 'builder' in d and not self.builder.picklable:
|
||||
d['builder'] = None
|
||||
return d
|
||||
|
||||
@staticmethod
|
||||
def _check_markup_is_url(markup):
|
||||
"""
|
||||
Check if markup looks like it's actually a url and raise a warning
|
||||
if so. Markup can be unicode or str (py2) / bytes (py3).
|
||||
"""
|
||||
if isinstance(markup, bytes):
|
||||
space = b' '
|
||||
cant_start_with = (b"http:", b"https:")
|
||||
elif isinstance(markup, unicode):
|
||||
space = u' '
|
||||
cant_start_with = (u"http:", u"https:")
|
||||
else:
|
||||
return
|
||||
|
||||
if any(markup.startswith(prefix) for prefix in cant_start_with):
|
||||
if not space in markup:
|
||||
if isinstance(markup, bytes):
|
||||
decoded_markup = markup.decode('utf-8', 'replace')
|
||||
else:
|
||||
decoded_markup = markup
|
||||
warnings.warn(
|
||||
'"%s" looks like a URL. Beautiful Soup is not an'
|
||||
' HTTP client. You should probably use an HTTP client like'
|
||||
' requests to get the document behind the URL, and feed'
|
||||
' that document to Beautiful Soup.' % decoded_markup
|
||||
)
|
||||
|
||||
def _feed(self):
|
||||
# Convert the document to Unicode.
|
||||
self.builder.reset()
|
||||
|
@ -229,9 +308,7 @@ class BeautifulSoup(Tag):
|
|||
|
||||
def new_string(self, s, subclass=NavigableString):
|
||||
"""Create a new NavigableString associated with this soup."""
|
||||
navigable = subclass(s)
|
||||
navigable.setup()
|
||||
return navigable
|
||||
return subclass(s)
|
||||
|
||||
def insert_before(self, successor):
|
||||
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
|
||||
|
@ -290,14 +367,60 @@ class BeautifulSoup(Tag):
|
|||
def object_was_parsed(self, o, parent=None, most_recent_element=None):
|
||||
"""Add an object to the parse tree."""
|
||||
parent = parent or self.currentTag
|
||||
most_recent_element = most_recent_element or self._most_recent_element
|
||||
o.setup(parent, most_recent_element)
|
||||
previous_element = most_recent_element or self._most_recent_element
|
||||
|
||||
next_element = previous_sibling = next_sibling = None
|
||||
if isinstance(o, Tag):
|
||||
next_element = o.next_element
|
||||
next_sibling = o.next_sibling
|
||||
previous_sibling = o.previous_sibling
|
||||
if not previous_element:
|
||||
previous_element = o.previous_element
|
||||
|
||||
o.setup(parent, previous_element, next_element, previous_sibling, next_sibling)
|
||||
|
||||
if most_recent_element is not None:
|
||||
most_recent_element.next_element = o
|
||||
self._most_recent_element = o
|
||||
parent.contents.append(o)
|
||||
|
||||
if parent.next_sibling:
|
||||
# This node is being inserted into an element that has
|
||||
# already been parsed. Deal with any dangling references.
|
||||
index = len(parent.contents)-1
|
||||
while index >= 0:
|
||||
if parent.contents[index] is o:
|
||||
break
|
||||
index -= 1
|
||||
else:
|
||||
raise ValueError(
|
||||
"Error building tree: supposedly %r was inserted "
|
||||
"into %r after the fact, but I don't see it!" % (
|
||||
o, parent
|
||||
)
|
||||
)
|
||||
if index == 0:
|
||||
previous_element = parent
|
||||
previous_sibling = None
|
||||
else:
|
||||
previous_element = previous_sibling = parent.contents[index-1]
|
||||
if index == len(parent.contents)-1:
|
||||
next_element = parent.next_sibling
|
||||
next_sibling = None
|
||||
else:
|
||||
next_element = next_sibling = parent.contents[index+1]
|
||||
|
||||
o.previous_element = previous_element
|
||||
if previous_element:
|
||||
previous_element.next_element = o
|
||||
o.next_element = next_element
|
||||
if next_element:
|
||||
next_element.previous_element = o
|
||||
o.next_sibling = next_sibling
|
||||
if next_sibling:
|
||||
next_sibling.previous_sibling = o
|
||||
o.previous_sibling = previous_sibling
|
||||
if previous_sibling:
|
||||
previous_sibling.next_sibling = o
|
||||
|
||||
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
|
||||
"""Pops the tag stack up to and including the most recent
|
||||
instance of the given tag. If inclusivePop is false, pops the tag
|
||||
|
@ -325,7 +448,7 @@ class BeautifulSoup(Tag):
|
|||
"""Push a start tag on to the stack.
|
||||
|
||||
If this method returns None, the tag was rejected by the
|
||||
SoupStrainer. You should proceed as if the tag had not occured
|
||||
SoupStrainer. You should proceed as if the tag had not occurred
|
||||
in the document. For instance, if this was a self-closing tag,
|
||||
don't call handle_endtag.
|
||||
"""
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
from collections import defaultdict
|
||||
import itertools
|
||||
import sys
|
||||
from bs4.element import (
|
||||
CharsetMetaAttributeValue,
|
||||
ContentMetaAttributeValue,
|
||||
HTMLAwareEntitySubstitution,
|
||||
whitespace_re
|
||||
)
|
||||
|
||||
|
@ -80,9 +84,12 @@ builder_registry = TreeBuilderRegistry()
|
|||
class TreeBuilder(object):
|
||||
"""Turn a document into a Beautiful Soup object tree."""
|
||||
|
||||
NAME = "[Unknown tree builder]"
|
||||
ALTERNATE_NAMES = []
|
||||
features = []
|
||||
|
||||
is_xml = False
|
||||
picklable = False
|
||||
preserve_whitespace_tags = set()
|
||||
empty_element_tags = None # A tag will be considered an empty-element
|
||||
# tag when and only when it has no contents.
|
||||
|
@ -224,7 +231,7 @@ class HTMLTreeBuilder(TreeBuilder):
|
|||
Such as which tags are empty-element tags.
|
||||
"""
|
||||
|
||||
preserve_whitespace_tags = set(['pre', 'textarea'])
|
||||
preserve_whitespace_tags = HTMLAwareEntitySubstitution.preserve_whitespace_tags
|
||||
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
|
||||
'spacer', 'link', 'frame', 'base'])
|
||||
|
||||
|
|
|
@ -1,3 +1,6 @@
|
|||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
__all__ = [
|
||||
'HTML5TreeBuilder',
|
||||
]
|
||||
|
@ -9,7 +12,10 @@ from bs4.builder import (
|
|||
HTML_5,
|
||||
HTMLTreeBuilder,
|
||||
)
|
||||
from bs4.element import NamespacedAttribute
|
||||
from bs4.element import (
|
||||
NamespacedAttribute,
|
||||
whitespace_re,
|
||||
)
|
||||
import html5lib
|
||||
from html5lib.constants import namespaces
|
||||
from bs4.element import (
|
||||
|
@ -19,14 +25,32 @@ from bs4.element import (
|
|||
Tag,
|
||||
)
|
||||
|
||||
try:
|
||||
# Pre-0.99999999
|
||||
from html5lib.treebuilders import _base as treebuilder_base
|
||||
new_html5lib = False
|
||||
except ImportError, e:
|
||||
# 0.99999999 and up
|
||||
from html5lib.treebuilders import base as treebuilder_base
|
||||
new_html5lib = True
|
||||
|
||||
class HTML5TreeBuilder(HTMLTreeBuilder):
|
||||
"""Use html5lib to build a tree."""
|
||||
|
||||
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
|
||||
NAME = "html5lib"
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding):
|
||||
features = [NAME, PERMISSIVE, HTML_5, HTML]
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding,
|
||||
document_declared_encoding=None, exclude_encodings=None):
|
||||
# Store the user-specified encoding for use later on.
|
||||
self.user_specified_encoding = user_specified_encoding
|
||||
|
||||
# document_declared_encoding and exclude_encodings aren't used
|
||||
# ATM because the html5lib TreeBuilder doesn't use
|
||||
# UnicodeDammit.
|
||||
if exclude_encodings:
|
||||
warnings.warn("You provided a value for exclude_encoding, but the html5lib tree builder doesn't support exclude_encoding.")
|
||||
yield (markup, None, None, False)
|
||||
|
||||
# These methods are defined by Beautiful Soup.
|
||||
|
@ -34,7 +58,14 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
|||
if self.soup.parse_only is not None:
|
||||
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
|
||||
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
|
||||
doc = parser.parse(markup, encoding=self.user_specified_encoding)
|
||||
|
||||
extra_kwargs = dict()
|
||||
if not isinstance(markup, unicode):
|
||||
if new_html5lib:
|
||||
extra_kwargs['override_encoding'] = self.user_specified_encoding
|
||||
else:
|
||||
extra_kwargs['encoding'] = self.user_specified_encoding
|
||||
doc = parser.parse(markup, **extra_kwargs)
|
||||
|
||||
# Set the character encoding detected by the tokenizer.
|
||||
if isinstance(markup, unicode):
|
||||
|
@ -42,7 +73,13 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
|||
# charEncoding to UTF-8 if it gets Unicode input.
|
||||
doc.original_encoding = None
|
||||
else:
|
||||
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
|
||||
original_encoding = parser.tokenizer.stream.charEncoding[0]
|
||||
if not isinstance(original_encoding, basestring):
|
||||
# In 0.99999999 and up, the encoding is an html5lib
|
||||
# Encoding object. We want to use a string for compatibility
|
||||
# with other tree builders.
|
||||
original_encoding = original_encoding.name
|
||||
doc.original_encoding = original_encoding
|
||||
|
||||
def create_treebuilder(self, namespaceHTMLElements):
|
||||
self.underlying_builder = TreeBuilderForHtml5lib(
|
||||
|
@ -54,7 +91,7 @@ class HTML5TreeBuilder(HTMLTreeBuilder):
|
|||
return u'<html><head></head><body>%s</body></html>' % fragment
|
||||
|
||||
|
||||
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
|
||||
class TreeBuilderForHtml5lib(treebuilder_base.TreeBuilder):
|
||||
|
||||
def __init__(self, soup, namespaceHTMLElements):
|
||||
self.soup = soup
|
||||
|
@ -92,7 +129,7 @@ class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
|
|||
return self.soup
|
||||
|
||||
def getFragment(self):
|
||||
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
|
||||
return treebuilder_base.TreeBuilder.getFragment(self).element
|
||||
|
||||
class AttrList(object):
|
||||
def __init__(self, element):
|
||||
|
@ -101,7 +138,16 @@ class AttrList(object):
|
|||
def __iter__(self):
|
||||
return list(self.attrs.items()).__iter__()
|
||||
def __setitem__(self, name, value):
|
||||
"set attr", name, value
|
||||
# If this attribute is a multi-valued attribute for this element,
|
||||
# turn its value into a list.
|
||||
list_attr = HTML5TreeBuilder.cdata_list_attributes
|
||||
if (name in list_attr['*']
|
||||
or (self.element.name in list_attr
|
||||
and name in list_attr[self.element.name])):
|
||||
# A node that is being cloned may have already undergone
|
||||
# this procedure.
|
||||
if not isinstance(value, list):
|
||||
value = whitespace_re.split(value)
|
||||
self.element[name] = value
|
||||
def items(self):
|
||||
return list(self.attrs.items())
|
||||
|
@ -115,9 +161,9 @@ class AttrList(object):
|
|||
return name in list(self.attrs.keys())
|
||||
|
||||
|
||||
class Element(html5lib.treebuilders._base.Node):
|
||||
class Element(treebuilder_base.Node):
|
||||
def __init__(self, element, soup, namespace):
|
||||
html5lib.treebuilders._base.Node.__init__(self, element.name)
|
||||
treebuilder_base.Node.__init__(self, element.name)
|
||||
self.element = element
|
||||
self.soup = soup
|
||||
self.namespace = namespace
|
||||
|
@ -161,6 +207,12 @@ class Element(html5lib.treebuilders._base.Node):
|
|||
# immediately after the parent, if it has no children.)
|
||||
if self.element.contents:
|
||||
most_recent_element = self.element._last_descendant(False)
|
||||
elif self.element.next_element is not None:
|
||||
# Something from further ahead in the parse tree is
|
||||
# being inserted into this earlier element. This is
|
||||
# very annoying because it means an expensive search
|
||||
# for the last element in the tree.
|
||||
most_recent_element = self.soup._last_descendant()
|
||||
else:
|
||||
most_recent_element = self.element
|
||||
|
||||
|
@ -172,6 +224,7 @@ class Element(html5lib.treebuilders._base.Node):
|
|||
return AttrList(self.element)
|
||||
|
||||
def setAttributes(self, attributes):
|
||||
|
||||
if attributes is not None and len(attributes) > 0:
|
||||
|
||||
converted_attributes = []
|
||||
|
@ -218,6 +271,9 @@ class Element(html5lib.treebuilders._base.Node):
|
|||
|
||||
def reparentChildren(self, new_parent):
|
||||
"""Move all of this tag's children into another tag."""
|
||||
# print "MOVE", self.element.contents
|
||||
# print "FROM", self.element
|
||||
# print "TO", new_parent.element
|
||||
element = self.element
|
||||
new_parent_element = new_parent.element
|
||||
# Determine what this tag's next_element will be once all the children
|
||||
|
@ -236,17 +292,28 @@ class Element(html5lib.treebuilders._base.Node):
|
|||
new_parents_last_descendant_next_element = new_parent_element.next_element
|
||||
|
||||
to_append = element.contents
|
||||
append_after = new_parent.element.contents
|
||||
append_after = new_parent_element.contents
|
||||
if len(to_append) > 0:
|
||||
# Set the first child's previous_element and previous_sibling
|
||||
# to elements within the new parent
|
||||
first_child = to_append[0]
|
||||
if new_parents_last_descendant:
|
||||
first_child.previous_element = new_parents_last_descendant
|
||||
else:
|
||||
first_child.previous_element = new_parent_element
|
||||
first_child.previous_sibling = new_parents_last_child
|
||||
if new_parents_last_descendant:
|
||||
new_parents_last_descendant.next_element = first_child
|
||||
else:
|
||||
new_parent_element.next_element = first_child
|
||||
if new_parents_last_child:
|
||||
new_parents_last_child.next_sibling = first_child
|
||||
|
||||
# Fix the last child's next_element and next_sibling
|
||||
last_child = to_append[-1]
|
||||
last_child.next_element = new_parents_last_descendant_next_element
|
||||
if new_parents_last_descendant_next_element:
|
||||
new_parents_last_descendant_next_element.previous_element = last_child
|
||||
last_child.next_sibling = None
|
||||
|
||||
for child in to_append:
|
||||
|
@ -257,6 +324,10 @@ class Element(html5lib.treebuilders._base.Node):
|
|||
element.contents = []
|
||||
element.next_element = final_next_element
|
||||
|
||||
# print "DONE WITH MOVE"
|
||||
# print "FROM", self.element
|
||||
# print "TO", new_parent_element
|
||||
|
||||
def cloneNode(self):
|
||||
tag = self.soup.new_tag(self.element.name, self.namespace)
|
||||
node = Element(tag, self.soup, self.namespace)
|
||||
|
@ -277,7 +348,7 @@ class Element(html5lib.treebuilders._base.Node):
|
|||
|
||||
class TextNode(Element):
|
||||
def __init__(self, element, soup):
|
||||
html5lib.treebuilders._base.Node.__init__(self, None)
|
||||
treebuilder_base.Node.__init__(self, None)
|
||||
self.element = element
|
||||
self.soup = soup
|
||||
|
||||
|
|
|
@ -1,13 +1,22 @@
|
|||
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
|
||||
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
__all__ = [
|
||||
'HTMLParserTreeBuilder',
|
||||
]
|
||||
|
||||
from HTMLParser import (
|
||||
HTMLParser,
|
||||
HTMLParseError,
|
||||
)
|
||||
from HTMLParser import HTMLParser
|
||||
|
||||
try:
|
||||
from HTMLParser import HTMLParseError
|
||||
except ImportError, e:
|
||||
# HTMLParseError is removed in Python 3.5. Since it can never be
|
||||
# thrown in 3.5, we can just define our own class as a placeholder.
|
||||
class HTMLParseError(Exception):
|
||||
pass
|
||||
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
|
@ -19,10 +28,10 @@ import warnings
|
|||
# At the end of this file, we monkeypatch HTMLParser so that
|
||||
# strict=True works well on Python 3.2.2.
|
||||
major, minor, release = sys.version_info[:3]
|
||||
CONSTRUCTOR_TAKES_STRICT = (
|
||||
major > 3
|
||||
or (major == 3 and minor > 2)
|
||||
or (major == 3 and minor == 2 and release >= 3))
|
||||
CONSTRUCTOR_TAKES_STRICT = major == 3 and minor == 2 and release >= 3
|
||||
CONSTRUCTOR_STRICT_IS_DEPRECATED = major == 3 and minor == 3
|
||||
CONSTRUCTOR_TAKES_CONVERT_CHARREFS = major == 3 and minor >= 4
|
||||
|
||||
|
||||
from bs4.element import (
|
||||
CData,
|
||||
|
@ -63,7 +72,8 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
|||
|
||||
def handle_charref(self, name):
|
||||
# XXX workaround for a bug in HTMLParser. Remove this once
|
||||
# it's fixed.
|
||||
# it's fixed in all supported versions.
|
||||
# http://bugs.python.org/issue13633
|
||||
if name.startswith('x'):
|
||||
real_name = int(name.lstrip('x'), 16)
|
||||
elif name.startswith('X'):
|
||||
|
@ -113,14 +123,6 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
|||
|
||||
def handle_pi(self, data):
|
||||
self.soup.endData()
|
||||
if data.endswith("?") and data.lower().startswith("xml"):
|
||||
# "An XHTML processing instruction using the trailing '?'
|
||||
# will cause the '?' to be included in data." - HTMLParser
|
||||
# docs.
|
||||
#
|
||||
# Strip the question mark so we don't end up with two
|
||||
# question marks.
|
||||
data = data[:-1]
|
||||
self.soup.handle_data(data)
|
||||
self.soup.endData(ProcessingInstruction)
|
||||
|
||||
|
@ -128,15 +130,19 @@ class BeautifulSoupHTMLParser(HTMLParser):
|
|||
class HTMLParserTreeBuilder(HTMLTreeBuilder):
|
||||
|
||||
is_xml = False
|
||||
features = [HTML, STRICT, HTMLPARSER]
|
||||
picklable = True
|
||||
NAME = HTMLPARSER
|
||||
features = [NAME, HTML, STRICT]
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
if CONSTRUCTOR_TAKES_STRICT:
|
||||
if CONSTRUCTOR_TAKES_STRICT and not CONSTRUCTOR_STRICT_IS_DEPRECATED:
|
||||
kwargs['strict'] = False
|
||||
if CONSTRUCTOR_TAKES_CONVERT_CHARREFS:
|
||||
kwargs['convert_charrefs'] = False
|
||||
self.parser_args = (args, kwargs)
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||
document_declared_encoding=None):
|
||||
document_declared_encoding=None, exclude_encodings=None):
|
||||
"""
|
||||
:return: A 4-tuple (markup, original encoding, encoding
|
||||
declared within markup, whether any characters had to be
|
||||
|
@ -147,7 +153,8 @@ class HTMLParserTreeBuilder(HTMLTreeBuilder):
|
|||
return
|
||||
|
||||
try_encodings = [user_specified_encoding, document_declared_encoding]
|
||||
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
|
||||
dammit = UnicodeDammit(markup, try_encodings, is_html=True,
|
||||
exclude_encodings=exclude_encodings)
|
||||
yield (dammit.markup, dammit.original_encoding,
|
||||
dammit.declared_html_encoding,
|
||||
dammit.contains_replacement_characters)
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
__all__ = [
|
||||
'LXMLTreeBuilderForXML',
|
||||
'LXMLTreeBuilder',
|
||||
|
@ -7,7 +9,13 @@ from io import BytesIO
|
|||
from StringIO import StringIO
|
||||
import collections
|
||||
from lxml import etree
|
||||
from bs4.element import Comment, Doctype, NamespacedAttribute
|
||||
from bs4.element import (
|
||||
Comment,
|
||||
Doctype,
|
||||
NamespacedAttribute,
|
||||
ProcessingInstruction,
|
||||
XMLProcessingInstruction,
|
||||
)
|
||||
from bs4.builder import (
|
||||
FAST,
|
||||
HTML,
|
||||
|
@ -24,9 +32,13 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
DEFAULT_PARSER_CLASS = etree.XMLParser
|
||||
|
||||
is_xml = True
|
||||
processing_instruction_class = XMLProcessingInstruction
|
||||
|
||||
NAME = "lxml-xml"
|
||||
ALTERNATE_NAMES = ["xml"]
|
||||
|
||||
# Well, it's permissive by XML parser standards.
|
||||
features = [LXML, XML, FAST, PERMISSIVE]
|
||||
features = [NAME, LXML, XML, FAST, PERMISSIVE]
|
||||
|
||||
CHUNK_SIZE = 512
|
||||
|
||||
|
@ -70,6 +82,7 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
return (None, tag)
|
||||
|
||||
def prepare_markup(self, markup, user_specified_encoding=None,
|
||||
exclude_encodings=None,
|
||||
document_declared_encoding=None):
|
||||
"""
|
||||
:yield: A series of 4-tuples.
|
||||
|
@ -78,6 +91,16 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
|
||||
Each 4-tuple represents a strategy for parsing the document.
|
||||
"""
|
||||
# Instead of using UnicodeDammit to convert the bytestring to
|
||||
# Unicode using different encodings, use EncodingDetector to
|
||||
# iterate over the encodings, and tell lxml to try to parse
|
||||
# the document as each one in turn.
|
||||
is_html = not self.is_xml
|
||||
if is_html:
|
||||
self.processing_instruction_class = ProcessingInstruction
|
||||
else:
|
||||
self.processing_instruction_class = XMLProcessingInstruction
|
||||
|
||||
if isinstance(markup, unicode):
|
||||
# We were given Unicode. Maybe lxml can parse Unicode on
|
||||
# this system?
|
||||
|
@ -89,13 +112,9 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
yield (markup.encode("utf8"), "utf8",
|
||||
document_declared_encoding, False)
|
||||
|
||||
# Instead of using UnicodeDammit to convert the bytestring to
|
||||
# Unicode using different encodings, use EncodingDetector to
|
||||
# iterate over the encodings, and tell lxml to try to parse
|
||||
# the document as each one in turn.
|
||||
is_html = not self.is_xml
|
||||
try_encodings = [user_specified_encoding, document_declared_encoding]
|
||||
detector = EncodingDetector(markup, try_encodings, is_html)
|
||||
detector = EncodingDetector(
|
||||
markup, try_encodings, is_html, exclude_encodings)
|
||||
for encoding in detector.encodings:
|
||||
yield (detector.markup, encoding, document_declared_encoding, False)
|
||||
|
||||
|
@ -189,7 +208,9 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
self.nsmaps.pop()
|
||||
|
||||
def pi(self, target, data):
|
||||
pass
|
||||
self.soup.endData()
|
||||
self.soup.handle_data(target + ' ' + data)
|
||||
self.soup.endData(self.processing_instruction_class)
|
||||
|
||||
def data(self, content):
|
||||
self.soup.handle_data(content)
|
||||
|
@ -212,8 +233,12 @@ class LXMLTreeBuilderForXML(TreeBuilder):
|
|||
|
||||
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
|
||||
|
||||
features = [LXML, HTML, FAST, PERMISSIVE]
|
||||
NAME = LXML
|
||||
ALTERNATE_NAMES = ["lxml-html"]
|
||||
|
||||
features = ALTERNATE_NAMES + [NAME, HTML, FAST, PERMISSIVE]
|
||||
is_xml = False
|
||||
processing_instruction_class = ProcessingInstruction
|
||||
|
||||
def default_parser(self, encoding):
|
||||
return etree.HTMLParser
|
||||
|
|
|
@ -3,9 +3,12 @@
|
|||
|
||||
This library converts a bytestream to Unicode through any means
|
||||
necessary. It is heavily based on code from Mark Pilgrim's Universal
|
||||
Feed Parser. It works best on XML and XML, but it does not rewrite the
|
||||
Feed Parser. It works best on XML and HTML, but it does not rewrite the
|
||||
XML or HTML to reflect a new encoding; that's the tree builder's job.
|
||||
"""
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
__license__ = "MIT"
|
||||
|
||||
import codecs
|
||||
from htmlentitydefs import codepoint2name
|
||||
|
@ -212,8 +215,11 @@ class EncodingDetector:
|
|||
|
||||
5. Windows-1252.
|
||||
"""
|
||||
def __init__(self, markup, override_encodings=None, is_html=False):
|
||||
def __init__(self, markup, override_encodings=None, is_html=False,
|
||||
exclude_encodings=None):
|
||||
self.override_encodings = override_encodings or []
|
||||
exclude_encodings = exclude_encodings or []
|
||||
self.exclude_encodings = set([x.lower() for x in exclude_encodings])
|
||||
self.chardet_encoding = None
|
||||
self.is_html = is_html
|
||||
self.declared_encoding = None
|
||||
|
@ -224,6 +230,8 @@ class EncodingDetector:
|
|||
def _usable(self, encoding, tried):
|
||||
if encoding is not None:
|
||||
encoding = encoding.lower()
|
||||
if encoding in self.exclude_encodings:
|
||||
return False
|
||||
if encoding not in tried:
|
||||
tried.add(encoding)
|
||||
return True
|
||||
|
@ -266,6 +274,9 @@ class EncodingDetector:
|
|||
def strip_byte_order_mark(cls, data):
|
||||
"""If a byte-order mark is present, strip it and return the encoding it implies."""
|
||||
encoding = None
|
||||
if isinstance(data, unicode):
|
||||
# Unicode data cannot have a byte-order mark.
|
||||
return data, encoding
|
||||
if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \
|
||||
and (data[2:4] != '\x00\x00'):
|
||||
encoding = 'utf-16be'
|
||||
|
@ -306,7 +317,7 @@ class EncodingDetector:
|
|||
declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos)
|
||||
if declared_encoding_match is not None:
|
||||
declared_encoding = declared_encoding_match.groups()[0].decode(
|
||||
'ascii')
|
||||
'ascii', 'replace')
|
||||
if declared_encoding:
|
||||
return declared_encoding.lower()
|
||||
return None
|
||||
|
@ -331,13 +342,14 @@ class UnicodeDammit:
|
|||
]
|
||||
|
||||
def __init__(self, markup, override_encodings=[],
|
||||
smart_quotes_to=None, is_html=False):
|
||||
smart_quotes_to=None, is_html=False, exclude_encodings=[]):
|
||||
self.smart_quotes_to = smart_quotes_to
|
||||
self.tried_encodings = []
|
||||
self.contains_replacement_characters = False
|
||||
self.is_html = is_html
|
||||
|
||||
self.detector = EncodingDetector(markup, override_encodings, is_html)
|
||||
self.log = logging.getLogger(__name__)
|
||||
self.detector = EncodingDetector(
|
||||
markup, override_encodings, is_html, exclude_encodings)
|
||||
|
||||
# Short-circuit if the data is in Unicode to begin with.
|
||||
if isinstance(markup, unicode) or markup == '':
|
||||
|
@ -365,9 +377,10 @@ class UnicodeDammit:
|
|||
if encoding != "ascii":
|
||||
u = self._convert_from(encoding, "replace")
|
||||
if u is not None:
|
||||
logging.warning(
|
||||
self.log.warning(
|
||||
"Some characters could not be decoded, and were "
|
||||
"replaced with REPLACEMENT CHARACTER.")
|
||||
"replaced with REPLACEMENT CHARACTER."
|
||||
)
|
||||
self.contains_replacement_characters = True
|
||||
break
|
||||
|
||||
|
|
|
@ -1,4 +1,9 @@
|
|||
"""Diagnostic functions, mainly for use when doing tech support."""
|
||||
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
__license__ = "MIT"
|
||||
|
||||
import cProfile
|
||||
from StringIO import StringIO
|
||||
from HTMLParser import HTMLParser
|
||||
|
@ -33,18 +38,28 @@ def diagnose(data):
|
|||
|
||||
if 'lxml' in basic_parsers:
|
||||
basic_parsers.append(["lxml", "xml"])
|
||||
try:
|
||||
from lxml import etree
|
||||
print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))
|
||||
except ImportError, e:
|
||||
print (
|
||||
"lxml is not installed or couldn't be imported.")
|
||||
|
||||
|
||||
if 'html5lib' in basic_parsers:
|
||||
try:
|
||||
import html5lib
|
||||
print "Found html5lib version %s" % html5lib.__version__
|
||||
except ImportError, e:
|
||||
print (
|
||||
"html5lib is not installed or couldn't be imported.")
|
||||
|
||||
if hasattr(data, 'read'):
|
||||
data = data.read()
|
||||
elif os.path.exists(data):
|
||||
print '"%s" looks like a filename. Reading data from the file.' % data
|
||||
data = open(data).read()
|
||||
with open(data) as fp:
|
||||
data = fp.read()
|
||||
elif data.startswith("http:") or data.startswith("https:"):
|
||||
print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data
|
||||
print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup."
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
__license__ = "MIT"
|
||||
|
||||
import collections
|
||||
import re
|
||||
import shlex
|
||||
import sys
|
||||
import warnings
|
||||
from bs4.dammit import EntitySubstitution
|
||||
|
@ -96,6 +101,8 @@ class HTMLAwareEntitySubstitution(EntitySubstitution):
|
|||
|
||||
preformatted_tags = set(["pre"])
|
||||
|
||||
preserve_whitespace_tags = set(['pre', 'textarea'])
|
||||
|
||||
@classmethod
|
||||
def _substitute_if_appropriate(cls, ns, f):
|
||||
if (isinstance(ns, NavigableString)
|
||||
|
@ -166,11 +173,19 @@ class PageElement(object):
|
|||
|
||||
This is used when mapping a formatter name ("minimal") to an
|
||||
appropriate function (one that performs entity-substitution on
|
||||
the contents of <script> and <style> tags, or not). It's
|
||||
the contents of <script> and <style> tags, or not). It can be
|
||||
inefficient, but it should be called very rarely.
|
||||
"""
|
||||
if self.known_xml is not None:
|
||||
# Most of the time we will have determined this when the
|
||||
# document is parsed.
|
||||
return self.known_xml
|
||||
|
||||
# Otherwise, it's likely that this element was created by
|
||||
# direct invocation of the constructor from within the user's
|
||||
# Python code.
|
||||
if self.parent is None:
|
||||
# This is the top-level object. It should have .is_xml set
|
||||
# This is the top-level object. It should have .known_xml set
|
||||
# from tree creation. If not, take a guess--BS is usually
|
||||
# used on HTML markup.
|
||||
return getattr(self, 'is_xml', False)
|
||||
|
@ -185,24 +200,40 @@ class PageElement(object):
|
|||
return self.HTML_FORMATTERS.get(
|
||||
name, HTMLAwareEntitySubstitution.substitute_xml)
|
||||
|
||||
def setup(self, parent=None, previous_element=None):
|
||||
def setup(self, parent=None, previous_element=None, next_element=None,
|
||||
previous_sibling=None, next_sibling=None):
|
||||
"""Sets up the initial relations between this element and
|
||||
other elements."""
|
||||
self.parent = parent
|
||||
|
||||
self.previous_element = previous_element
|
||||
if previous_element is not None:
|
||||
self.previous_element.next_element = self
|
||||
self.next_element = None
|
||||
self.previous_sibling = None
|
||||
self.next_sibling = None
|
||||
if self.parent is not None and self.parent.contents:
|
||||
self.previous_sibling = self.parent.contents[-1]
|
||||
|
||||
self.next_element = next_element
|
||||
if self.next_element:
|
||||
self.next_element.previous_element = self
|
||||
|
||||
self.next_sibling = next_sibling
|
||||
if self.next_sibling:
|
||||
self.next_sibling.previous_sibling = self
|
||||
|
||||
if (not previous_sibling
|
||||
and self.parent is not None and self.parent.contents):
|
||||
previous_sibling = self.parent.contents[-1]
|
||||
|
||||
self.previous_sibling = previous_sibling
|
||||
if previous_sibling:
|
||||
self.previous_sibling.next_sibling = self
|
||||
|
||||
nextSibling = _alias("next_sibling") # BS3
|
||||
previousSibling = _alias("previous_sibling") # BS3
|
||||
|
||||
def replace_with(self, replace_with):
|
||||
if not self.parent:
|
||||
raise ValueError(
|
||||
"Cannot replace one element with another when the"
|
||||
"element to be replaced is not part of a tree.")
|
||||
if replace_with is self:
|
||||
return
|
||||
if replace_with is self.parent:
|
||||
|
@ -216,6 +247,10 @@ class PageElement(object):
|
|||
|
||||
def unwrap(self):
|
||||
my_parent = self.parent
|
||||
if not self.parent:
|
||||
raise ValueError(
|
||||
"Cannot replace an element with its contents when that"
|
||||
"element is not part of a tree.")
|
||||
my_index = self.parent.index(self)
|
||||
self.extract()
|
||||
for child in reversed(self.contents[:]):
|
||||
|
@ -240,17 +275,20 @@ class PageElement(object):
|
|||
last_child = self._last_descendant()
|
||||
next_element = last_child.next_element
|
||||
|
||||
if self.previous_element is not None:
|
||||
if (self.previous_element is not None and
|
||||
self.previous_element is not next_element):
|
||||
self.previous_element.next_element = next_element
|
||||
if next_element is not None:
|
||||
if next_element is not None and next_element is not self.previous_element:
|
||||
next_element.previous_element = self.previous_element
|
||||
self.previous_element = None
|
||||
last_child.next_element = None
|
||||
|
||||
self.parent = None
|
||||
if self.previous_sibling is not None:
|
||||
if (self.previous_sibling is not None
|
||||
and self.previous_sibling is not self.next_sibling):
|
||||
self.previous_sibling.next_sibling = self.next_sibling
|
||||
if self.next_sibling is not None:
|
||||
if (self.next_sibling is not None
|
||||
and self.next_sibling is not self.previous_sibling):
|
||||
self.next_sibling.previous_sibling = self.previous_sibling
|
||||
self.previous_sibling = self.next_sibling = None
|
||||
return self
|
||||
|
@ -263,13 +301,15 @@ class PageElement(object):
|
|||
last_child = self
|
||||
while isinstance(last_child, Tag) and last_child.contents:
|
||||
last_child = last_child.contents[-1]
|
||||
if not accept_self and last_child == self:
|
||||
if not accept_self and last_child is self:
|
||||
last_child = None
|
||||
return last_child
|
||||
# BS3: Not part of the API!
|
||||
_lastRecursiveChild = _last_descendant
|
||||
|
||||
def insert(self, position, new_child):
|
||||
if new_child is None:
|
||||
raise ValueError("Cannot insert None into a tag.")
|
||||
if new_child is self:
|
||||
raise ValueError("Cannot insert a tag into itself.")
|
||||
if (isinstance(new_child, basestring)
|
||||
|
@ -478,6 +518,10 @@ class PageElement(object):
|
|||
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
|
||||
"Iterates over a generator looking for things that match."
|
||||
|
||||
if text is None and 'string' in kwargs:
|
||||
text = kwargs['string']
|
||||
del kwargs['string']
|
||||
|
||||
if isinstance(name, SoupStrainer):
|
||||
strainer = name
|
||||
else:
|
||||
|
@ -548,17 +592,17 @@ class PageElement(object):
|
|||
|
||||
# Methods for supporting CSS selectors.
|
||||
|
||||
tag_name_re = re.compile('^[a-z0-9]+$')
|
||||
tag_name_re = re.compile('^[a-zA-Z0-9][-.a-zA-Z0-9:_]*$')
|
||||
|
||||
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
|
||||
# \---/ \---/\-------------/ \-------/
|
||||
# /^([a-zA-Z0-9][-.a-zA-Z0-9:_]*)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
|
||||
# \---------------------------/ \---/\-------------/ \-------/
|
||||
# | | | |
|
||||
# | | | The value
|
||||
# | | ~,|,^,$,* or =
|
||||
# | Attribute
|
||||
# Tag
|
||||
attribselect_re = re.compile(
|
||||
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
|
||||
r'^(?P<tag>[a-zA-Z0-9][-.a-zA-Z0-9:_]*)?\[(?P<attribute>[\w-]+)(?P<operator>[=~\|\^\$\*]?)' +
|
||||
r'=?"?(?P<value>[^\]"]*)"?\]$'
|
||||
)
|
||||
|
||||
|
@ -605,7 +649,7 @@ class PageElement(object):
|
|||
return lambda el: el._attr_value_as_string(
|
||||
attribute, '').startswith(value)
|
||||
elif operator == '$':
|
||||
# string represenation of `attribute` ends with `value`
|
||||
# string representation of `attribute` ends with `value`
|
||||
return lambda el: el._attr_value_as_string(
|
||||
attribute, '').endswith(value)
|
||||
elif operator == '*':
|
||||
|
@ -645,6 +689,11 @@ class NavigableString(unicode, PageElement):
|
|||
PREFIX = ''
|
||||
SUFFIX = ''
|
||||
|
||||
# We can't tell just by looking at a string whether it's contained
|
||||
# in an XML document or an HTML document.
|
||||
|
||||
known_xml = None
|
||||
|
||||
def __new__(cls, value):
|
||||
"""Create a new NavigableString.
|
||||
|
||||
|
@ -654,11 +703,17 @@ class NavigableString(unicode, PageElement):
|
|||
how to handle non-ASCII characters.
|
||||
"""
|
||||
if isinstance(value, unicode):
|
||||
return unicode.__new__(cls, value)
|
||||
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
|
||||
u = unicode.__new__(cls, value)
|
||||
else:
|
||||
u = unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
|
||||
u.setup()
|
||||
return u
|
||||
|
||||
def __copy__(self):
|
||||
return self
|
||||
"""A copy of a NavigableString has the same contents and class
|
||||
as the original, but it is not connected to the parse tree.
|
||||
"""
|
||||
return type(self)(self)
|
||||
|
||||
def __getnewargs__(self):
|
||||
return (unicode(self),)
|
||||
|
@ -705,7 +760,13 @@ class CData(PreformattedString):
|
|||
SUFFIX = u']]>'
|
||||
|
||||
class ProcessingInstruction(PreformattedString):
|
||||
"""A SGML processing instruction."""
|
||||
|
||||
PREFIX = u'<?'
|
||||
SUFFIX = u'>'
|
||||
|
||||
class XMLProcessingInstruction(ProcessingInstruction):
|
||||
"""An XML processing instruction."""
|
||||
PREFIX = u'<?'
|
||||
SUFFIX = u'?>'
|
||||
|
||||
|
@ -716,8 +777,8 @@ class Comment(PreformattedString):
|
|||
|
||||
|
||||
class Declaration(PreformattedString):
|
||||
PREFIX = u'<!'
|
||||
SUFFIX = u'!>'
|
||||
PREFIX = u'<?'
|
||||
SUFFIX = u'?>'
|
||||
|
||||
|
||||
class Doctype(PreformattedString):
|
||||
|
@ -743,7 +804,8 @@ class Tag(PageElement):
|
|||
"""Represents a found HTML tag with its attributes and contents."""
|
||||
|
||||
def __init__(self, parser=None, builder=None, name=None, namespace=None,
|
||||
prefix=None, attrs=None, parent=None, previous=None):
|
||||
prefix=None, attrs=None, parent=None, previous=None,
|
||||
is_xml=None):
|
||||
"Basic constructor."
|
||||
|
||||
if parser is None:
|
||||
|
@ -757,13 +819,31 @@ class Tag(PageElement):
|
|||
self.name = name
|
||||
self.namespace = namespace
|
||||
self.prefix = prefix
|
||||
if builder is not None:
|
||||
preserve_whitespace_tags = builder.preserve_whitespace_tags
|
||||
else:
|
||||
if is_xml:
|
||||
preserve_whitespace_tags = []
|
||||
else:
|
||||
preserve_whitespace_tags = HTMLAwareEntitySubstitution.preserve_whitespace_tags
|
||||
self.preserve_whitespace_tags = preserve_whitespace_tags
|
||||
if attrs is None:
|
||||
attrs = {}
|
||||
elif attrs and builder.cdata_list_attributes:
|
||||
elif attrs:
|
||||
if builder is not None and builder.cdata_list_attributes:
|
||||
attrs = builder._replace_cdata_list_attribute_values(
|
||||
self.name, attrs)
|
||||
else:
|
||||
attrs = dict(attrs)
|
||||
else:
|
||||
attrs = dict(attrs)
|
||||
|
||||
# If possible, determine ahead of time whether this tag is an
|
||||
# XML tag.
|
||||
if builder:
|
||||
self.known_xml = builder.is_xml
|
||||
else:
|
||||
self.known_xml = is_xml
|
||||
self.attrs = attrs
|
||||
self.contents = []
|
||||
self.setup(parent, previous)
|
||||
|
@ -778,6 +858,18 @@ class Tag(PageElement):
|
|||
|
||||
parserClass = _alias("parser_class") # BS3
|
||||
|
||||
def __copy__(self):
|
||||
"""A copy of a Tag is a new Tag, unconnected to the parse tree.
|
||||
Its contents are a copy of the old Tag's contents.
|
||||
"""
|
||||
clone = type(self)(None, self.builder, self.name, self.namespace,
|
||||
self.nsprefix, self.attrs, is_xml=self._is_xml)
|
||||
for attr in ('can_be_empty_element', 'hidden'):
|
||||
setattr(clone, attr, getattr(self, attr))
|
||||
for child in self.contents:
|
||||
clone.append(child.__copy__())
|
||||
return clone
|
||||
|
||||
@property
|
||||
def is_empty_element(self):
|
||||
"""Is this tag an empty-element tag? (aka a self-closing tag)
|
||||
|
@ -944,7 +1036,7 @@ class Tag(PageElement):
|
|||
tag_name, tag_name))
|
||||
return self.find(tag_name)
|
||||
# We special case contents to avoid recursion.
|
||||
elif not tag.startswith("__") and not tag=="contents":
|
||||
elif not tag.startswith("__") and not tag == "contents":
|
||||
return self.find(tag)
|
||||
raise AttributeError(
|
||||
"'%s' object has no attribute '%s'" % (self.__class__, tag))
|
||||
|
@ -971,14 +1063,24 @@ class Tag(PageElement):
|
|||
as defined in __eq__."""
|
||||
return not self == other
|
||||
|
||||
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
|
||||
def __repr__(self, encoding="unicode-escape"):
|
||||
"""Renders this tag as a string."""
|
||||
if PY3K:
|
||||
# "The return value must be a string object", i.e. Unicode
|
||||
return self.decode()
|
||||
else:
|
||||
# "The return value must be a string object", i.e. a bytestring.
|
||||
# By convention, the return value of __repr__ should also be
|
||||
# an ASCII string.
|
||||
return self.encode(encoding)
|
||||
|
||||
def __unicode__(self):
|
||||
return self.decode()
|
||||
|
||||
def __str__(self):
|
||||
if PY3K:
|
||||
return self.decode()
|
||||
else:
|
||||
return self.encode()
|
||||
|
||||
if PY3K:
|
||||
|
@ -994,10 +1096,11 @@ class Tag(PageElement):
|
|||
|
||||
def _should_pretty_print(self, indent_level):
|
||||
"""Should this tag be pretty-printed?"""
|
||||
|
||||
return (
|
||||
indent_level is not None and
|
||||
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
|
||||
or self._is_xml))
|
||||
indent_level is not None
|
||||
and self.name not in self.preserve_whitespace_tags
|
||||
)
|
||||
|
||||
def decode(self, indent_level=None,
|
||||
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
|
||||
|
@ -1103,12 +1206,18 @@ class Tag(PageElement):
|
|||
formatter="minimal"):
|
||||
"""Renders the contents of this tag as a Unicode string.
|
||||
|
||||
:param indent_level: Each line of the rendering will be
|
||||
indented this many spaces.
|
||||
|
||||
:param eventual_encoding: The tag is destined to be
|
||||
encoded into this encoding. This method is _not_
|
||||
responsible for performing that encoding. This information
|
||||
is passed in so that it can be substituted in if the
|
||||
document contains a <META> tag that mentions the document's
|
||||
encoding.
|
||||
|
||||
:param formatter: The output formatter responsible for converting
|
||||
entities to Unicode characters.
|
||||
"""
|
||||
# First off, turn a string formatter into a function. This
|
||||
# will stop the lookup from happening over and over again.
|
||||
|
@ -1137,7 +1246,17 @@ class Tag(PageElement):
|
|||
def encode_contents(
|
||||
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
|
||||
formatter="minimal"):
|
||||
"""Renders the contents of this tag as a bytestring."""
|
||||
"""Renders the contents of this tag as a bytestring.
|
||||
|
||||
:param indent_level: Each line of the rendering will be
|
||||
indented this many spaces.
|
||||
|
||||
:param eventual_encoding: The bytestring will be in this encoding.
|
||||
|
||||
:param formatter: The output formatter responsible for converting
|
||||
entities to Unicode characters.
|
||||
"""
|
||||
|
||||
contents = self.decode_contents(indent_level, encoding, formatter)
|
||||
return contents.encode(encoding)
|
||||
|
||||
|
@ -1201,26 +1320,57 @@ class Tag(PageElement):
|
|||
|
||||
_selector_combinators = ['>', '+', '~']
|
||||
_select_debug = False
|
||||
def select(self, selector, _candidate_generator=None):
|
||||
quoted_colon = re.compile('"[^"]*:[^"]*"')
|
||||
def select_one(self, selector):
|
||||
"""Perform a CSS selection operation on the current element."""
|
||||
tokens = selector.split()
|
||||
value = self.select(selector, limit=1)
|
||||
if value:
|
||||
return value[0]
|
||||
return None
|
||||
|
||||
def select(self, selector, _candidate_generator=None, limit=None):
|
||||
"""Perform a CSS selection operation on the current element."""
|
||||
|
||||
# Handle grouping selectors if ',' exists, ie: p,a
|
||||
if ',' in selector:
|
||||
context = []
|
||||
for partial_selector in selector.split(','):
|
||||
partial_selector = partial_selector.strip()
|
||||
if partial_selector == '':
|
||||
raise ValueError('Invalid group selection syntax: %s' % selector)
|
||||
candidates = self.select(partial_selector, limit=limit)
|
||||
for candidate in candidates:
|
||||
if candidate not in context:
|
||||
context.append(candidate)
|
||||
|
||||
if limit and len(context) >= limit:
|
||||
break
|
||||
return context
|
||||
tokens = shlex.split(selector)
|
||||
current_context = [self]
|
||||
|
||||
if tokens[-1] in self._selector_combinators:
|
||||
raise ValueError(
|
||||
'Final combinator "%s" is missing an argument.' % tokens[-1])
|
||||
|
||||
if self._select_debug:
|
||||
print 'Running CSS selector "%s"' % selector
|
||||
|
||||
for index, token in enumerate(tokens):
|
||||
if self._select_debug:
|
||||
print ' Considering token "%s"' % token
|
||||
recursive_candidate_generator = None
|
||||
tag_name = None
|
||||
new_context = []
|
||||
new_context_ids = set([])
|
||||
|
||||
if tokens[index-1] in self._selector_combinators:
|
||||
# This token was consumed by the previous combinator. Skip it.
|
||||
if self._select_debug:
|
||||
print ' Token was consumed by the previous combinator.'
|
||||
continue
|
||||
|
||||
if self._select_debug:
|
||||
print ' Considering token "%s"' % token
|
||||
recursive_candidate_generator = None
|
||||
tag_name = None
|
||||
|
||||
# Each operation corresponds to a checker function, a rule
|
||||
# for determining whether a candidate matches the
|
||||
# selector. Candidates are generated by the active
|
||||
|
@ -1248,7 +1398,7 @@ class Tag(PageElement):
|
|||
return classes.issubset(candidate.get('class', []))
|
||||
checker = classes_match
|
||||
|
||||
elif ':' in token:
|
||||
elif ':' in token and not self.quoted_colon.search(token):
|
||||
# Pseudo-class
|
||||
tag_name, pseudo = token.split(':', 1)
|
||||
if tag_name == '':
|
||||
|
@ -1256,7 +1406,10 @@ class Tag(PageElement):
|
|||
"A pseudo-class must be prefixed with a tag name.")
|
||||
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
|
||||
found = []
|
||||
if pseudo_attributes is not None:
|
||||
if pseudo_attributes is None:
|
||||
pseudo_type = pseudo
|
||||
pseudo_value = None
|
||||
else:
|
||||
pseudo_type, pseudo_value = pseudo_attributes.groups()
|
||||
if pseudo_type == 'nth-of-type':
|
||||
try:
|
||||
|
@ -1276,10 +1429,7 @@ class Tag(PageElement):
|
|||
self.count += 1
|
||||
if self.count == self.destination:
|
||||
return True
|
||||
if self.count > self.destination:
|
||||
# Stop the generator that's sending us
|
||||
# these things.
|
||||
raise StopIteration()
|
||||
else:
|
||||
return False
|
||||
checker = Counter(pseudo_value).nth_child_of_type
|
||||
else:
|
||||
|
@ -1311,7 +1461,6 @@ class Tag(PageElement):
|
|||
else:
|
||||
raise ValueError(
|
||||
'Unsupported or invalid CSS selector: "%s"' % token)
|
||||
|
||||
if recursive_candidate_generator:
|
||||
# This happens when the selector looks like "> foo".
|
||||
#
|
||||
|
@ -1361,8 +1510,7 @@ class Tag(PageElement):
|
|||
else:
|
||||
_use_candidate_generator = _candidate_generator
|
||||
|
||||
new_context = []
|
||||
new_context_ids = set([])
|
||||
count = 0
|
||||
for tag in current_context:
|
||||
if self._select_debug:
|
||||
print " Running candidate generator on %s %s" % (
|
||||
|
@ -1391,6 +1539,8 @@ class Tag(PageElement):
|
|||
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
|
||||
|
||||
current_context = new_context
|
||||
if limit and len(current_context) >= limit:
|
||||
current_context = current_context[:limit]
|
||||
|
||||
if self._select_debug:
|
||||
print "Final verdict:"
|
||||
|
@ -1554,20 +1704,14 @@ class SoupStrainer(object):
|
|||
if isinstance(markup, list) or isinstance(markup, tuple):
|
||||
# This should only happen when searching a multi-valued attribute
|
||||
# like 'class'.
|
||||
if (isinstance(match_against, unicode)
|
||||
and ' ' in match_against):
|
||||
# A bit of a special case. If they try to match "foo
|
||||
# bar" on a multivalue attribute's value, only accept
|
||||
# the literal value "foo bar"
|
||||
#
|
||||
# XXX This is going to be pretty slow because we keep
|
||||
# splitting match_against. But it shouldn't come up
|
||||
# too often.
|
||||
return (whitespace_re.split(match_against) == markup)
|
||||
else:
|
||||
for item in markup:
|
||||
if self._matches(item, match_against):
|
||||
return True
|
||||
# We didn't match any particular value of the multivalue
|
||||
# attribute, but maybe we match the attribute value when
|
||||
# considered as a string.
|
||||
if self._matches(' '.join(markup), match_against):
|
||||
return True
|
||||
return False
|
||||
|
||||
if match_against is True:
|
||||
|
|
|
@ -1,5 +1,10 @@
|
|||
"""Helper classes for tests."""
|
||||
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
__license__ = "MIT"
|
||||
|
||||
import pickle
|
||||
import copy
|
||||
import functools
|
||||
import unittest
|
||||
|
@ -43,6 +48,16 @@ class SoupTest(unittest.TestCase):
|
|||
|
||||
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
|
||||
|
||||
def assertConnectedness(self, element):
|
||||
"""Ensure that next_element and previous_element are properly
|
||||
set for all descendants of the given element.
|
||||
"""
|
||||
earlier = None
|
||||
for e in element.descendants:
|
||||
if earlier:
|
||||
self.assertEqual(e, earlier.next_element)
|
||||
self.assertEqual(earlier, e.previous_element)
|
||||
earlier = e
|
||||
|
||||
class HTMLTreeBuilderSmokeTest(object):
|
||||
|
||||
|
@ -54,6 +69,15 @@ class HTMLTreeBuilderSmokeTest(object):
|
|||
markup in these tests, there's not much room for interpretation.
|
||||
"""
|
||||
|
||||
def test_pickle_and_unpickle_identity(self):
|
||||
# Pickling a tree, then unpickling it, yields a tree identical
|
||||
# to the original.
|
||||
tree = self.soup("<a><b>foo</a>")
|
||||
dumped = pickle.dumps(tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
self.assertEqual(loaded.__class__, BeautifulSoup)
|
||||
self.assertEqual(loaded.decode(), tree.decode())
|
||||
|
||||
def assertDoctypeHandled(self, doctype_fragment):
|
||||
"""Assert that a given doctype string is handled correctly."""
|
||||
doctype_str, soup = self._document_with_doctype(doctype_fragment)
|
||||
|
@ -114,6 +138,19 @@ class HTMLTreeBuilderSmokeTest(object):
|
|||
soup.encode("utf-8").replace(b"\n", b""),
|
||||
markup.replace(b"\n", b""))
|
||||
|
||||
def test_processing_instruction(self):
|
||||
# We test both Unicode and bytestring to verify that
|
||||
# process_markup correctly sets processing_instruction_class
|
||||
# even when the markup is already Unicode and there is no
|
||||
# need to process anything.
|
||||
markup = u"""<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.decode())
|
||||
|
||||
markup = b"""<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode("utf8"))
|
||||
|
||||
def test_deepcopy(self):
|
||||
"""Make sure you can copy the tree builder.
|
||||
|
||||
|
@ -155,6 +192,23 @@ class HTMLTreeBuilderSmokeTest(object):
|
|||
def test_nested_formatting_elements(self):
|
||||
self.assertSoupEquals("<em><em></em></em>")
|
||||
|
||||
def test_double_head(self):
|
||||
html = '''<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Ordinary HEAD element test</title>
|
||||
</head>
|
||||
<script type="text/javascript">
|
||||
alert("Help!");
|
||||
</script>
|
||||
<body>
|
||||
Hello, world!
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
soup = self.soup(html)
|
||||
self.assertEqual("text/javascript", soup.find('script')['type'])
|
||||
|
||||
def test_comment(self):
|
||||
# Comments are represented as Comment objects.
|
||||
markup = "<p>foo<!--foobar-->baz</p>"
|
||||
|
@ -171,9 +225,22 @@ class HTMLTreeBuilderSmokeTest(object):
|
|||
self.assertEqual(comment, baz.previous_element)
|
||||
|
||||
def test_preserved_whitespace_in_pre_and_textarea(self):
|
||||
"""Whitespace must be preserved in <pre> and <textarea> tags."""
|
||||
self.assertSoupEquals("<pre> </pre>")
|
||||
self.assertSoupEquals("<textarea> woo </textarea>")
|
||||
"""Whitespace must be preserved in <pre> and <textarea> tags,
|
||||
even if that would mean not prettifying the markup.
|
||||
"""
|
||||
pre_markup = "<pre> </pre>"
|
||||
textarea_markup = "<textarea> woo\nwoo </textarea>"
|
||||
self.assertSoupEquals(pre_markup)
|
||||
self.assertSoupEquals(textarea_markup)
|
||||
|
||||
soup = self.soup(pre_markup)
|
||||
self.assertEqual(soup.pre.prettify(), pre_markup)
|
||||
|
||||
soup = self.soup(textarea_markup)
|
||||
self.assertEqual(soup.textarea.prettify(), textarea_markup)
|
||||
|
||||
soup = self.soup("<textarea></textarea>")
|
||||
self.assertEqual(soup.textarea.prettify(), "<textarea></textarea>")
|
||||
|
||||
def test_nested_inline_elements(self):
|
||||
"""Inline elements can be nested indefinitely."""
|
||||
|
@ -221,6 +288,14 @@ class HTMLTreeBuilderSmokeTest(object):
|
|||
soup = self.soup(markup)
|
||||
self.assertEqual(["css"], soup.div.div['class'])
|
||||
|
||||
def test_multivalued_attribute_on_html(self):
|
||||
# html5lib uses a different API to set the attributes ot the
|
||||
# <html> tag. This has caused problems with multivalued
|
||||
# attributes.
|
||||
markup = '<html class="a b"></html>'
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(["a", "b"], soup.html['class'])
|
||||
|
||||
def test_angle_brackets_in_attribute_values_are_escaped(self):
|
||||
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
|
||||
|
||||
|
@ -253,6 +328,35 @@ class HTMLTreeBuilderSmokeTest(object):
|
|||
soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
|
||||
self.assertEqual("p", soup.h2.string.next_element.name)
|
||||
self.assertEqual("p", soup.p.name)
|
||||
self.assertConnectedness(soup)
|
||||
|
||||
def test_head_tag_between_head_and_body(self):
|
||||
"Prevent recurrence of a bug in the html5lib treebuilder."
|
||||
content = """<html><head></head>
|
||||
<link></link>
|
||||
<body>foo</body>
|
||||
</html>
|
||||
"""
|
||||
soup = self.soup(content)
|
||||
self.assertNotEqual(None, soup.html.body)
|
||||
self.assertConnectedness(soup)
|
||||
|
||||
def test_multiple_copies_of_a_tag(self):
|
||||
"Prevent recurrence of a bug in the html5lib treebuilder."
|
||||
content = """<!DOCTYPE html>
|
||||
<html>
|
||||
<body>
|
||||
<article id="a" >
|
||||
<div><a href="1"></div>
|
||||
<footer>
|
||||
<a href="2"></a>
|
||||
</footer>
|
||||
</article>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
soup = self.soup(content)
|
||||
self.assertConnectedness(soup.article)
|
||||
|
||||
def test_basic_namespaces(self):
|
||||
"""Parsers don't need to *understand* namespaces, but at the
|
||||
|
@ -399,7 +503,9 @@ class HTMLTreeBuilderSmokeTest(object):
|
|||
hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
|
||||
soup = self.soup(
|
||||
hebrew_document, from_encoding="iso8859-8")
|
||||
self.assertEqual(soup.original_encoding, 'iso8859-8')
|
||||
# Some tree builders call it iso8859-8, others call it iso-8859-9.
|
||||
# That's not a difference we really care about.
|
||||
assert soup.original_encoding in ('iso8859-8', 'iso-8859-8')
|
||||
self.assertEqual(
|
||||
soup.encode('utf-8'),
|
||||
hebrew_document.decode("iso8859-8").encode("utf-8"))
|
||||
|
@ -463,11 +569,30 @@ class HTMLTreeBuilderSmokeTest(object):
|
|||
|
||||
class XMLTreeBuilderSmokeTest(object):
|
||||
|
||||
def test_pickle_and_unpickle_identity(self):
|
||||
# Pickling a tree, then unpickling it, yields a tree identical
|
||||
# to the original.
|
||||
tree = self.soup("<a><b>foo</a>")
|
||||
dumped = pickle.dumps(tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
self.assertEqual(loaded.__class__, BeautifulSoup)
|
||||
self.assertEqual(loaded.decode(), tree.decode())
|
||||
|
||||
def test_docstring_generated(self):
|
||||
soup = self.soup("<root/>")
|
||||
self.assertEqual(
|
||||
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
|
||||
|
||||
def test_xml_declaration(self):
|
||||
markup = b"""<?xml version="1.0" encoding="utf8"?>\n<foo/>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode("utf8"))
|
||||
|
||||
def test_processing_instruction(self):
|
||||
markup = b"""<?xml version="1.0" encoding="utf8"?>\n<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(markup, soup.encode("utf8"))
|
||||
|
||||
def test_real_xhtml_document(self):
|
||||
"""A real XHTML document should come out *exactly* the same as it went in."""
|
||||
markup = b"""<?xml version="1.0" encoding="utf-8"?>
|
||||
|
@ -485,7 +610,7 @@ class XMLTreeBuilderSmokeTest(object):
|
|||
<script type="text/javascript">
|
||||
</script>
|
||||
"""
|
||||
soup = BeautifulSoup(doc, "xml")
|
||||
soup = BeautifulSoup(doc, "lxml-xml")
|
||||
# lxml would have stripped this while parsing, but we can add
|
||||
# it later.
|
||||
soup.script.string = 'console.log("< < hey > > ");'
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
"""Tests of the builder registry."""
|
||||
|
||||
import unittest
|
||||
import warnings
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from bs4.builder import (
|
||||
|
@ -67,6 +68,11 @@ class BuiltInRegistryTest(unittest.TestCase):
|
|||
HTMLParserTreeBuilder)
|
||||
|
||||
def test_beautifulsoup_constructor_does_lookup(self):
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
# This will create a warning about not explicitly
|
||||
# specifying a parser, but we'll ignore it.
|
||||
|
||||
# You can pass in a string.
|
||||
BeautifulSoup("", features="html")
|
||||
# Or a list of strings.
|
||||
|
|
|
@ -83,3 +83,27 @@ class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest):
|
|||
soup = self.soup(markup)
|
||||
self.assertEqual(u"<body><p><em>foo</em></p><em>\n</em><p><em>bar<a></a></em></p>\n</body>", soup.body.decode())
|
||||
self.assertEqual(2, len(soup.find_all('p')))
|
||||
|
||||
def test_reparented_markup_containing_identical_whitespace_nodes(self):
|
||||
"""Verify that we keep the two whitespace nodes in this
|
||||
document distinct when reparenting the adjacent <tbody> tags.
|
||||
"""
|
||||
markup = '<table> <tbody><tbody><ims></tbody> </table>'
|
||||
soup = self.soup(markup)
|
||||
space1, space2 = soup.find_all(string=' ')
|
||||
tbody1, tbody2 = soup.find_all('tbody')
|
||||
assert space1.next_element is tbody1
|
||||
assert tbody2.next_element is space2
|
||||
|
||||
def test_processing_instruction(self):
|
||||
"""Processing instructions become comments."""
|
||||
markup = b"""<?PITarget PIContent?>"""
|
||||
soup = self.soup(markup)
|
||||
assert str(soup).startswith("<!--?PITarget PIContent?-->")
|
||||
|
||||
def test_cloned_multivalue_node(self):
|
||||
markup = b"""<a class="my_class"><p></a>"""
|
||||
soup = self.soup(markup)
|
||||
a1, a2 = soup.find_all('a')
|
||||
self.assertEqual(a1, a2)
|
||||
assert a1 is not a2
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
"""Tests to ensure that the html.parser tree builder generates good
|
||||
trees."""
|
||||
|
||||
from pdb import set_trace
|
||||
import pickle
|
||||
from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest
|
||||
from bs4.builder import HTMLParserTreeBuilder
|
||||
|
||||
|
@ -17,3 +19,14 @@ class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
|
|||
def test_namespaced_public_doctype(self):
|
||||
# html.parser can't handle namespaced doctypes, so skip this one.
|
||||
pass
|
||||
|
||||
def test_builder_is_pickled(self):
|
||||
"""Unlike most tree builders, HTMLParserTreeBuilder and will
|
||||
be restored after pickling.
|
||||
"""
|
||||
tree = self.soup("<a><b>foo</a>")
|
||||
dumped = pickle.dumps(tree, 2)
|
||||
loaded = pickle.loads(dumped)
|
||||
self.assertTrue(isinstance(loaded.builder, type(tree.builder)))
|
||||
|
||||
|
||||
|
|
|
@ -65,21 +65,6 @@ class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
|
|||
self.assertEqual(u"<b/>", unicode(soup.b))
|
||||
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
|
||||
|
||||
def test_real_xhtml_document(self):
|
||||
"""lxml strips the XML definition from an XHTML doc, which is fine."""
|
||||
markup = b"""<?xml version="1.0" encoding="utf-8"?>
|
||||
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
|
||||
<html xmlns="http://www.w3.org/1999/xhtml">
|
||||
<head><title>Hello.</title></head>
|
||||
<body>Goodbye.</body>
|
||||
</html>"""
|
||||
soup = self.soup(markup)
|
||||
self.assertEqual(
|
||||
soup.encode("utf-8").replace(b"\n", b''),
|
||||
markup.replace(b'\n', b'').replace(
|
||||
b'<?xml version="1.0" encoding="utf-8"?>', b''))
|
||||
|
||||
|
||||
@skipIf(
|
||||
not LXML_PRESENT,
|
||||
"lxml seems not to be present, not testing its XML tree builder.")
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Tests of Beautiful Soup as a whole."""
|
||||
|
||||
from pdb import set_trace
|
||||
import logging
|
||||
import unittest
|
||||
import sys
|
||||
|
@ -20,6 +21,7 @@ import bs4.dammit
|
|||
from bs4.dammit import (
|
||||
EntitySubstitution,
|
||||
UnicodeDammit,
|
||||
EncodingDetector,
|
||||
)
|
||||
from bs4.testing import (
|
||||
SoupTest,
|
||||
|
@ -33,7 +35,6 @@ try:
|
|||
except ImportError, e:
|
||||
LXML_PRESENT = False
|
||||
|
||||
PYTHON_2_PRE_2_7 = (sys.version_info < (2,7))
|
||||
PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2))
|
||||
|
||||
class TestConstructor(SoupTest):
|
||||
|
@ -48,8 +49,34 @@ class TestConstructor(SoupTest):
|
|||
soup = self.soup(data)
|
||||
self.assertEqual(u"foo\0bar", soup.h1.string)
|
||||
|
||||
def test_exclude_encodings(self):
|
||||
utf8_data = u"Räksmörgås".encode("utf-8")
|
||||
soup = self.soup(utf8_data, exclude_encodings=["utf-8"])
|
||||
self.assertEqual("windows-1252", soup.original_encoding)
|
||||
|
||||
class TestDeprecatedConstructorArguments(SoupTest):
|
||||
|
||||
class TestWarnings(SoupTest):
|
||||
|
||||
def _no_parser_specified(self, s, is_there=True):
|
||||
v = s.startswith(BeautifulSoup.NO_PARSER_SPECIFIED_WARNING[:80])
|
||||
self.assertTrue(v)
|
||||
|
||||
def test_warning_if_no_parser_specified(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("<a><b></b></a>")
|
||||
msg = str(w[0].message)
|
||||
self._assert_no_parser_specified(msg)
|
||||
|
||||
def test_warning_if_parser_specified_too_vague(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("<a><b></b></a>", "html")
|
||||
msg = str(w[0].message)
|
||||
self._assert_no_parser_specified(msg)
|
||||
|
||||
def test_no_warning_if_explicit_parser_specified(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("<a><b></b></a>", "html.parser")
|
||||
self.assertEqual([], w)
|
||||
|
||||
def test_parseOnlyThese_renamed_to_parse_only(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
|
@ -90,15 +117,34 @@ class TestWarnings(SoupTest):
|
|||
soup = self.soup(filename)
|
||||
self.assertEqual(0, len(w))
|
||||
|
||||
def test_url_warning(self):
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("http://www.crummy.com/")
|
||||
msg = str(w[0].message)
|
||||
self.assertTrue("looks like a URL" in msg)
|
||||
def test_url_warning_with_bytes_url(self):
|
||||
with warnings.catch_warnings(record=True) as warning_list:
|
||||
soup = self.soup(b"http://www.crummybytes.com/")
|
||||
# Be aware this isn't the only warning that can be raised during
|
||||
# execution..
|
||||
self.assertTrue(any("looks like a URL" in str(w.message)
|
||||
for w in warning_list))
|
||||
|
||||
def test_url_warning_with_unicode_url(self):
|
||||
with warnings.catch_warnings(record=True) as warning_list:
|
||||
# note - this url must differ from the bytes one otherwise
|
||||
# python's warnings system swallows the second warning
|
||||
soup = self.soup(u"http://www.crummyunicode.com/")
|
||||
self.assertTrue(any("looks like a URL" in str(w.message)
|
||||
for w in warning_list))
|
||||
|
||||
def test_url_warning_with_bytes_and_space(self):
|
||||
with warnings.catch_warnings(record=True) as warning_list:
|
||||
soup = self.soup(b"http://www.crummybytes.com/ is great")
|
||||
self.assertFalse(any("looks like a URL" in str(w.message)
|
||||
for w in warning_list))
|
||||
|
||||
def test_url_warning_with_unicode_and_space(self):
|
||||
with warnings.catch_warnings(record=True) as warning_list:
|
||||
soup = self.soup(u"http://www.crummyuncode.com/ is great")
|
||||
self.assertFalse(any("looks like a URL" in str(w.message)
|
||||
for w in warning_list))
|
||||
|
||||
with warnings.catch_warnings(record=True) as w:
|
||||
soup = self.soup("http://www.crummy.com/ is great")
|
||||
self.assertEqual(0, len(w))
|
||||
|
||||
class TestSelectiveParsing(SoupTest):
|
||||
|
||||
|
@ -232,7 +278,7 @@ class TestEncodingConversion(SoupTest):
|
|||
self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data)
|
||||
|
||||
@skipIf(
|
||||
PYTHON_2_PRE_2_7 or PYTHON_3_PRE_3_2,
|
||||
PYTHON_3_PRE_3_2,
|
||||
"Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.")
|
||||
def test_attribute_name_containing_unicode_characters(self):
|
||||
markup = u'<div><a \N{SNOWMAN}="snowman"></a></div>'
|
||||
|
@ -271,10 +317,11 @@ class TestUnicodeDammit(unittest.TestCase):
|
|||
dammit.unicode_markup, """<foo>''""</foo>""")
|
||||
|
||||
def test_detect_utf8(self):
|
||||
utf8 = b"\xc3\xa9"
|
||||
utf8 = b"Sacr\xc3\xa9 bleu! \xe2\x98\x83"
|
||||
dammit = UnicodeDammit(utf8)
|
||||
self.assertEqual(dammit.unicode_markup, u'\xe9')
|
||||
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
|
||||
self.assertEqual(dammit.unicode_markup, u'Sacr\xe9 bleu! \N{SNOWMAN}')
|
||||
|
||||
|
||||
def test_convert_hebrew(self):
|
||||
hebrew = b"\xed\xe5\xec\xf9"
|
||||
|
@ -299,6 +346,26 @@ class TestUnicodeDammit(unittest.TestCase):
|
|||
dammit = UnicodeDammit(utf8_data, [bad_encoding])
|
||||
self.assertEqual(dammit.original_encoding.lower(), 'utf-8')
|
||||
|
||||
def test_exclude_encodings(self):
|
||||
# This is UTF-8.
|
||||
utf8_data = u"Räksmörgås".encode("utf-8")
|
||||
|
||||
# But if we exclude UTF-8 from consideration, the guess is
|
||||
# Windows-1252.
|
||||
dammit = UnicodeDammit(utf8_data, exclude_encodings=["utf-8"])
|
||||
self.assertEqual(dammit.original_encoding.lower(), 'windows-1252')
|
||||
|
||||
# And if we exclude that, there is no valid guess at all.
|
||||
dammit = UnicodeDammit(
|
||||
utf8_data, exclude_encodings=["utf-8", "windows-1252"])
|
||||
self.assertEqual(dammit.original_encoding, None)
|
||||
|
||||
def test_encoding_detector_replaces_junk_in_encoding_name_with_replacement_character(self):
|
||||
detected = EncodingDetector(
|
||||
b'<?xml version="1.0" encoding="UTF-\xdb" ?>')
|
||||
encodings = list(detected.encodings)
|
||||
assert u'utf-\N{REPLACEMENT CHARACTER}' in encodings
|
||||
|
||||
def test_detect_html5_style_meta_tag(self):
|
||||
|
||||
for data in (
|
||||
|
|
|
@ -9,6 +9,7 @@ same markup, but all Beautiful Soup trees can be traversed with the
|
|||
methods tested here.
|
||||
"""
|
||||
|
||||
from pdb import set_trace
|
||||
import copy
|
||||
import pickle
|
||||
import re
|
||||
|
@ -19,8 +20,10 @@ from bs4.builder import (
|
|||
HTMLParserTreeBuilder,
|
||||
)
|
||||
from bs4.element import (
|
||||
PY3K,
|
||||
CData,
|
||||
Comment,
|
||||
Declaration,
|
||||
Doctype,
|
||||
NavigableString,
|
||||
SoupStrainer,
|
||||
|
@ -68,7 +71,13 @@ class TestFind(TreeTest):
|
|||
|
||||
def test_unicode_text_find(self):
|
||||
soup = self.soup(u'<h1>Räksmörgås</h1>')
|
||||
self.assertEqual(soup.find(text=u'Räksmörgås'), u'Räksmörgås')
|
||||
self.assertEqual(soup.find(string=u'Räksmörgås'), u'Räksmörgås')
|
||||
|
||||
def test_unicode_attribute_find(self):
|
||||
soup = self.soup(u'<h1 id="Räksmörgås">here it is</h1>')
|
||||
str(soup)
|
||||
self.assertEqual("here it is", soup.find(id=u'Räksmörgås').text)
|
||||
|
||||
|
||||
def test_find_everything(self):
|
||||
"""Test an optimization that finds all tags."""
|
||||
|
@ -87,6 +96,7 @@ class TestFindAll(TreeTest):
|
|||
"""You can search the tree for text nodes."""
|
||||
soup = self.soup("<html>Foo<b>bar</b>\xbb</html>")
|
||||
# Exact match.
|
||||
self.assertEqual(soup.find_all(string="bar"), [u"bar"])
|
||||
self.assertEqual(soup.find_all(text="bar"), [u"bar"])
|
||||
# Match any of a number of strings.
|
||||
self.assertEqual(
|
||||
|
@ -212,6 +222,17 @@ class TestFindAllByName(TreeTest):
|
|||
self.assertSelects(
|
||||
tree.find_all(id_matches_name), ["Match 1.", "Match 2."])
|
||||
|
||||
def test_find_with_multi_valued_attribute(self):
|
||||
soup = self.soup(
|
||||
"<div class='a b'>1</div><div class='a c'>2</div><div class='a d'>3</div>"
|
||||
)
|
||||
r1 = soup.find('div', 'a d');
|
||||
r2 = soup.find('div', re.compile(r'a d'));
|
||||
r3, r4 = soup.find_all('div', ['a b', 'a d']);
|
||||
self.assertEqual('3', r1.string)
|
||||
self.assertEqual('3', r2.string)
|
||||
self.assertEqual('1', r3.string)
|
||||
self.assertEqual('3', r4.string)
|
||||
|
||||
class TestFindAllByAttribute(TreeTest):
|
||||
|
||||
|
@ -284,10 +305,10 @@ class TestFindAllByAttribute(TreeTest):
|
|||
f = tree.find_all("gar", class_=re.compile("a"))
|
||||
self.assertSelects(f, ["Found it"])
|
||||
|
||||
# Since the class is not the string "foo bar", but the two
|
||||
# strings "foo" and "bar", this will not find anything.
|
||||
# If the search fails to match the individual strings "foo" and "bar",
|
||||
# it will be tried against the combined string "foo bar".
|
||||
f = tree.find_all("gar", class_=re.compile("o b"))
|
||||
self.assertSelects(f, [])
|
||||
self.assertSelects(f, ["Found it"])
|
||||
|
||||
def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self):
|
||||
soup = self.soup("<a class='bar'>Found it</a>")
|
||||
|
@ -325,7 +346,7 @@ class TestFindAllByAttribute(TreeTest):
|
|||
strainer = SoupStrainer(attrs={'id' : 'first'})
|
||||
self.assertSelects(tree.find_all(strainer), ['Match.'])
|
||||
|
||||
def test_find_all_with_missing_atribute(self):
|
||||
def test_find_all_with_missing_attribute(self):
|
||||
# You can pass in None as the value of an attribute to find_all.
|
||||
# This will match tags that do not have that attribute set.
|
||||
tree = self.soup("""<a id="1">ID present.</a>
|
||||
|
@ -688,7 +709,7 @@ class TestTagCreation(SoupTest):
|
|||
|
||||
def test_tag_inherits_self_closing_rules_from_builder(self):
|
||||
if XML_BUILDER_PRESENT:
|
||||
xml_soup = BeautifulSoup("", "xml")
|
||||
xml_soup = BeautifulSoup("", "lxml-xml")
|
||||
xml_br = xml_soup.new_tag("br")
|
||||
xml_p = xml_soup.new_tag("p")
|
||||
|
||||
|
@ -697,7 +718,7 @@ class TestTagCreation(SoupTest):
|
|||
self.assertEqual(b"<br/>", xml_br.encode())
|
||||
self.assertEqual(b"<p/>", xml_p.encode())
|
||||
|
||||
html_soup = BeautifulSoup("", "html")
|
||||
html_soup = BeautifulSoup("", "html.parser")
|
||||
html_br = html_soup.new_tag("br")
|
||||
html_p = html_soup.new_tag("p")
|
||||
|
||||
|
@ -773,6 +794,14 @@ class TestTreeModification(SoupTest):
|
|||
new_a = a.unwrap()
|
||||
self.assertEqual(a, new_a)
|
||||
|
||||
def test_replace_with_and_unwrap_give_useful_exception_when_tag_has_no_parent(self):
|
||||
soup = self.soup("<a><b>Foo</b></a><c>Bar</c>")
|
||||
a = soup.a
|
||||
a.extract()
|
||||
self.assertEqual(None, a.parent)
|
||||
self.assertRaises(ValueError, a.unwrap)
|
||||
self.assertRaises(ValueError, a.replace_with, soup.c)
|
||||
|
||||
def test_replace_tag_with_itself(self):
|
||||
text = "<a><b></b><c>Foo<d></d></c></a><a><e></e></a>"
|
||||
soup = self.soup(text)
|
||||
|
@ -1067,6 +1096,31 @@ class TestTreeModification(SoupTest):
|
|||
self.assertEqual(foo_2, soup.a.string)
|
||||
self.assertEqual(bar_2, soup.b.string)
|
||||
|
||||
def test_extract_multiples_of_same_tag(self):
|
||||
soup = self.soup("""
|
||||
<html>
|
||||
<head>
|
||||
<script>foo</script>
|
||||
</head>
|
||||
<body>
|
||||
<script>bar</script>
|
||||
<a></a>
|
||||
</body>
|
||||
<script>baz</script>
|
||||
</html>""")
|
||||
[soup.script.extract() for i in soup.find_all("script")]
|
||||
self.assertEqual("<body>\n\n<a></a>\n</body>", unicode(soup.body))
|
||||
|
||||
|
||||
def test_extract_works_when_element_is_surrounded_by_identical_strings(self):
|
||||
soup = self.soup(
|
||||
'<html>\n'
|
||||
'<body>hi</body>\n'
|
||||
'</html>')
|
||||
soup.find('body').extract()
|
||||
self.assertEqual(None, soup.find('body'))
|
||||
|
||||
|
||||
def test_clear(self):
|
||||
"""Tag.clear()"""
|
||||
soup = self.soup("<p><a>String <em>Italicized</em></a> and another</p>")
|
||||
|
@ -1285,6 +1339,13 @@ class TestPersistence(SoupTest):
|
|||
copied = copy.deepcopy(self.tree)
|
||||
self.assertEqual(copied.decode(), self.tree.decode())
|
||||
|
||||
def test_copy_preserves_encoding(self):
|
||||
soup = BeautifulSoup(b'<p> </p>', 'html.parser')
|
||||
encoding = soup.original_encoding
|
||||
copy = soup.__copy__()
|
||||
self.assertEqual(u"<p> </p>", unicode(copy))
|
||||
self.assertEqual(encoding, copy.original_encoding)
|
||||
|
||||
def test_unicode_pickle(self):
|
||||
# A tree containing Unicode characters can be pickled.
|
||||
html = u"<b>\N{SNOWMAN}</b>"
|
||||
|
@ -1293,6 +1354,51 @@ class TestPersistence(SoupTest):
|
|||
loaded = pickle.loads(dumped)
|
||||
self.assertEqual(loaded.decode(), soup.decode())
|
||||
|
||||
def test_copy_navigablestring_is_not_attached_to_tree(self):
|
||||
html = u"<b>Foo<a></a></b><b>Bar</b>"
|
||||
soup = self.soup(html)
|
||||
s1 = soup.find(string="Foo")
|
||||
s2 = copy.copy(s1)
|
||||
self.assertEqual(s1, s2)
|
||||
self.assertEqual(None, s2.parent)
|
||||
self.assertEqual(None, s2.next_element)
|
||||
self.assertNotEqual(None, s1.next_sibling)
|
||||
self.assertEqual(None, s2.next_sibling)
|
||||
self.assertEqual(None, s2.previous_element)
|
||||
|
||||
def test_copy_navigablestring_subclass_has_same_type(self):
|
||||
html = u"<b><!--Foo--></b>"
|
||||
soup = self.soup(html)
|
||||
s1 = soup.string
|
||||
s2 = copy.copy(s1)
|
||||
self.assertEqual(s1, s2)
|
||||
self.assertTrue(isinstance(s2, Comment))
|
||||
|
||||
def test_copy_entire_soup(self):
|
||||
html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end"
|
||||
soup = self.soup(html)
|
||||
soup_copy = copy.copy(soup)
|
||||
self.assertEqual(soup, soup_copy)
|
||||
|
||||
def test_copy_tag_copies_contents(self):
|
||||
html = u"<div><b>Foo<a></a></b><b>Bar</b></div>end"
|
||||
soup = self.soup(html)
|
||||
div = soup.div
|
||||
div_copy = copy.copy(div)
|
||||
|
||||
# The two tags look the same, and evaluate to equal.
|
||||
self.assertEqual(unicode(div), unicode(div_copy))
|
||||
self.assertEqual(div, div_copy)
|
||||
|
||||
# But they're not the same object.
|
||||
self.assertFalse(div is div_copy)
|
||||
|
||||
# And they don't have the same relation to the parse tree. The
|
||||
# copy is not associated with a parse tree at all.
|
||||
self.assertEqual(None, div_copy.parent)
|
||||
self.assertEqual(None, div_copy.previous_element)
|
||||
self.assertEqual(None, div_copy.find(string='Bar').next_element)
|
||||
self.assertNotEqual(None, div.find(string='Bar').next_element)
|
||||
|
||||
class TestSubstitutions(SoupTest):
|
||||
|
||||
|
@ -1366,7 +1472,7 @@ class TestSubstitutions(SoupTest):
|
|||
console.log("< < hey > > ");
|
||||
</script>
|
||||
"""
|
||||
encoded = BeautifulSoup(doc).encode()
|
||||
encoded = BeautifulSoup(doc, 'html.parser').encode()
|
||||
self.assertTrue(b"< < hey > >" in encoded)
|
||||
|
||||
def test_formatter_skips_style_tag_for_html_documents(self):
|
||||
|
@ -1375,7 +1481,7 @@ class TestSubstitutions(SoupTest):
|
|||
console.log("< < hey > > ");
|
||||
</style>
|
||||
"""
|
||||
encoded = BeautifulSoup(doc).encode()
|
||||
encoded = BeautifulSoup(doc, 'html.parser').encode()
|
||||
self.assertTrue(b"< < hey > >" in encoded)
|
||||
|
||||
def test_prettify_leaves_preformatted_text_alone(self):
|
||||
|
@ -1387,7 +1493,7 @@ class TestSubstitutions(SoupTest):
|
|||
soup.div.prettify())
|
||||
|
||||
def test_prettify_accepts_formatter(self):
|
||||
soup = BeautifulSoup("<html><body>foo</body></html>")
|
||||
soup = BeautifulSoup("<html><body>foo</body></html>", 'html.parser')
|
||||
pretty = soup.prettify(formatter = lambda x: x.upper())
|
||||
self.assertTrue("FOO" in pretty)
|
||||
|
||||
|
@ -1484,6 +1590,14 @@ class TestEncoding(SoupTest):
|
|||
self.assertEqual(
|
||||
u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents())
|
||||
|
||||
def test_repr(self):
|
||||
html = u"<b>\N{SNOWMAN}</b>"
|
||||
soup = self.soup(html)
|
||||
if PY3K:
|
||||
self.assertEqual(html, repr(soup))
|
||||
else:
|
||||
self.assertEqual(b'<b>\\u2603</b>', repr(soup))
|
||||
|
||||
class TestNavigableStringSubclasses(SoupTest):
|
||||
|
||||
def test_cdata(self):
|
||||
|
@ -1522,6 +1636,9 @@ class TestNavigableStringSubclasses(SoupTest):
|
|||
soup.insert(1, doctype)
|
||||
self.assertEqual(soup.encode(), b"<!DOCTYPE foo>\n")
|
||||
|
||||
def test_declaration(self):
|
||||
d = Declaration("foo")
|
||||
self.assertEqual("<?foo?>", d.output_ready())
|
||||
|
||||
class TestSoupSelector(TreeTest):
|
||||
|
||||
|
@ -1534,7 +1651,7 @@ class TestSoupSelector(TreeTest):
|
|||
<link rel="stylesheet" href="blah.css" type="text/css" id="l1">
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<custom-dashed-tag class="dashed" id="dash1">Hello there.</custom-dashed-tag>
|
||||
<div id="main" class="fancy">
|
||||
<div id="inner">
|
||||
<h1 id="header1">An H1</h1>
|
||||
|
@ -1552,8 +1669,18 @@ class TestSoupSelector(TreeTest):
|
|||
<a href="#" id="s2a1">span2a1</a>
|
||||
</span>
|
||||
<span class="span3"></span>
|
||||
<custom-dashed-tag class="dashed" id="dash2"/>
|
||||
<div data-tag="dashedvalue" id="data1"/>
|
||||
</span>
|
||||
</div>
|
||||
<x id="xid">
|
||||
<z id="zida"/>
|
||||
<z id="zidab"/>
|
||||
<z id="zidac"/>
|
||||
</x>
|
||||
<y id="yid">
|
||||
<z id="zidb"/>
|
||||
</y>
|
||||
<p lang="en" id="lang-en">English</p>
|
||||
<p lang="en-gb" id="lang-en-gb">English UK</p>
|
||||
<p lang="en-us" id="lang-en-us">English US</p>
|
||||
|
@ -1565,10 +1692,10 @@ class TestSoupSelector(TreeTest):
|
|||
"""
|
||||
|
||||
def setUp(self):
|
||||
self.soup = BeautifulSoup(self.HTML)
|
||||
self.soup = BeautifulSoup(self.HTML, 'html.parser')
|
||||
|
||||
def assertSelects(self, selector, expected_ids):
|
||||
el_ids = [el['id'] for el in self.soup.select(selector)]
|
||||
def assertSelects(self, selector, expected_ids, **kwargs):
|
||||
el_ids = [el['id'] for el in self.soup.select(selector, **kwargs)]
|
||||
el_ids.sort()
|
||||
expected_ids.sort()
|
||||
self.assertEqual(expected_ids, el_ids,
|
||||
|
@ -1591,17 +1718,32 @@ class TestSoupSelector(TreeTest):
|
|||
|
||||
def test_one_tag_many(self):
|
||||
els = self.soup.select('div')
|
||||
self.assertEqual(len(els), 3)
|
||||
self.assertEqual(len(els), 4)
|
||||
for div in els:
|
||||
self.assertEqual(div.name, 'div')
|
||||
|
||||
el = self.soup.select_one('div')
|
||||
self.assertEqual('main', el['id'])
|
||||
|
||||
def test_select_one_returns_none_if_no_match(self):
|
||||
match = self.soup.select_one('nonexistenttag')
|
||||
self.assertEqual(None, match)
|
||||
|
||||
|
||||
def test_tag_in_tag_one(self):
|
||||
els = self.soup.select('div div')
|
||||
self.assertSelects('div div', ['inner'])
|
||||
self.assertSelects('div div', ['inner', 'data1'])
|
||||
|
||||
def test_tag_in_tag_many(self):
|
||||
for selector in ('html div', 'html body div', 'body div'):
|
||||
self.assertSelects(selector, ['main', 'inner', 'footer'])
|
||||
self.assertSelects(selector, ['data1', 'main', 'inner', 'footer'])
|
||||
|
||||
|
||||
def test_limit(self):
|
||||
self.assertSelects('html div', ['main'], limit=1)
|
||||
self.assertSelects('html body div', ['inner', 'main'], limit=2)
|
||||
self.assertSelects('body div', ['data1', 'main', 'inner', 'footer'],
|
||||
limit=10)
|
||||
|
||||
def test_tag_no_match(self):
|
||||
self.assertEqual(len(self.soup.select('del')), 0)
|
||||
|
@ -1609,6 +1751,20 @@ class TestSoupSelector(TreeTest):
|
|||
def test_invalid_tag(self):
|
||||
self.assertRaises(ValueError, self.soup.select, 'tag%t')
|
||||
|
||||
def test_select_dashed_tag_ids(self):
|
||||
self.assertSelects('custom-dashed-tag', ['dash1', 'dash2'])
|
||||
|
||||
def test_select_dashed_by_id(self):
|
||||
dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]')
|
||||
self.assertEqual(dashed[0].name, 'custom-dashed-tag')
|
||||
self.assertEqual(dashed[0]['id'], 'dash2')
|
||||
|
||||
def test_dashed_tag_text(self):
|
||||
self.assertEqual(self.soup.select('body > custom-dashed-tag')[0].text, u'Hello there.')
|
||||
|
||||
def test_select_dashed_matches_find_all(self):
|
||||
self.assertEqual(self.soup.select('custom-dashed-tag'), self.soup.find_all('custom-dashed-tag'))
|
||||
|
||||
def test_header_tags(self):
|
||||
self.assertSelectMultiple(
|
||||
('h1', ['header1']),
|
||||
|
@ -1709,6 +1865,7 @@ class TestSoupSelector(TreeTest):
|
|||
('[id^="m"]', ['me', 'main']),
|
||||
('div[id^="m"]', ['main']),
|
||||
('a[id^="m"]', ['me']),
|
||||
('div[data-tag^="dashed"]', ['data1'])
|
||||
)
|
||||
|
||||
def test_attribute_endswith(self):
|
||||
|
@ -1716,8 +1873,8 @@ class TestSoupSelector(TreeTest):
|
|||
('[href$=".css"]', ['l1']),
|
||||
('link[href$=".css"]', ['l1']),
|
||||
('link[id$="1"]', ['l1']),
|
||||
('[id$="1"]', ['l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1']),
|
||||
('div[id$="1"]', []),
|
||||
('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']),
|
||||
('div[id$="1"]', ['data1']),
|
||||
('[id$="noending"]', []),
|
||||
)
|
||||
|
||||
|
@ -1730,7 +1887,6 @@ class TestSoupSelector(TreeTest):
|
|||
('[rel*="notstyle"]', []),
|
||||
('link[rel*="notstyle"]', []),
|
||||
('link[href*="bla"]', ['l1']),
|
||||
('a[href*="http://"]', ['bob', 'me']),
|
||||
('[href*="http://"]', ['bob', 'me']),
|
||||
('[id*="p"]', ['pmulti', 'p1']),
|
||||
('div[id*="m"]', ['main']),
|
||||
|
@ -1739,8 +1895,8 @@ class TestSoupSelector(TreeTest):
|
|||
('[href*=".css"]', ['l1']),
|
||||
('link[href*=".css"]', ['l1']),
|
||||
('link[id*="1"]', ['l1']),
|
||||
('[id*="1"]', ['l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1']),
|
||||
('div[id*="1"]', []),
|
||||
('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']),
|
||||
('div[id*="1"]', ['data1']),
|
||||
('[id*="noending"]', []),
|
||||
# New for this test
|
||||
('[href*="."]', ['bob', 'me', 'l1']),
|
||||
|
@ -1748,6 +1904,7 @@ class TestSoupSelector(TreeTest):
|
|||
('link[href*="."]', ['l1']),
|
||||
('div[id*="n"]', ['main', 'inner']),
|
||||
('div[id*="nn"]', ['inner']),
|
||||
('div[data-tag*="edval"]', ['data1'])
|
||||
)
|
||||
|
||||
def test_attribute_exact_or_hypen(self):
|
||||
|
@ -1767,8 +1924,25 @@ class TestSoupSelector(TreeTest):
|
|||
('p[class]', ['p1', 'pmulti']),
|
||||
('[blah]', []),
|
||||
('p[blah]', []),
|
||||
('div[data-tag]', ['data1'])
|
||||
)
|
||||
|
||||
def test_quoted_space_in_selector_name(self):
|
||||
html = """<div style="display: wrong">nope</div>
|
||||
<div style="display: right">yes</div>
|
||||
"""
|
||||
soup = BeautifulSoup(html, 'html.parser')
|
||||
[chosen] = soup.select('div[style="display: right"]')
|
||||
self.assertEqual("yes", chosen.string)
|
||||
|
||||
def test_unsupported_pseudoclass(self):
|
||||
self.assertRaises(
|
||||
NotImplementedError, self.soup.select, "a:no-such-pseudoclass")
|
||||
|
||||
self.assertRaises(
|
||||
NotImplementedError, self.soup.select, "a:nth-of-type(a)")
|
||||
|
||||
|
||||
def test_nth_of_type(self):
|
||||
# Try to select first paragraph
|
||||
els = self.soup.select('div#inner p:nth-of-type(1)')
|
||||
|
@ -1803,7 +1977,7 @@ class TestSoupSelector(TreeTest):
|
|||
selected = inner.select("div")
|
||||
# The <div id="inner"> tag was selected. The <div id="footer">
|
||||
# tag was not.
|
||||
self.assertSelectsIDs(selected, ['inner'])
|
||||
self.assertSelectsIDs(selected, ['inner', 'data1'])
|
||||
|
||||
def test_overspecified_child_id(self):
|
||||
self.assertSelects(".fancy #inner", ['inner'])
|
||||
|
@ -1827,3 +2001,44 @@ class TestSoupSelector(TreeTest):
|
|||
|
||||
def test_sibling_combinator_wont_select_same_tag_twice(self):
|
||||
self.assertSelects('p[lang] ~ p', ['lang-en-gb', 'lang-en-us', 'lang-fr'])
|
||||
|
||||
# Test the selector grouping operator (the comma)
|
||||
def test_multiple_select(self):
|
||||
self.assertSelects('x, y', ['xid', 'yid'])
|
||||
|
||||
def test_multiple_select_with_no_space(self):
|
||||
self.assertSelects('x,y', ['xid', 'yid'])
|
||||
|
||||
def test_multiple_select_with_more_space(self):
|
||||
self.assertSelects('x, y', ['xid', 'yid'])
|
||||
|
||||
def test_multiple_select_duplicated(self):
|
||||
self.assertSelects('x, x', ['xid'])
|
||||
|
||||
def test_multiple_select_sibling(self):
|
||||
self.assertSelects('x, y ~ p[lang=fr]', ['xid', 'lang-fr'])
|
||||
|
||||
def test_multiple_select_tag_and_direct_descendant(self):
|
||||
self.assertSelects('x, y > z', ['xid', 'zidb'])
|
||||
|
||||
def test_multiple_select_direct_descendant_and_tags(self):
|
||||
self.assertSelects('div > x, y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
|
||||
|
||||
def test_multiple_select_indirect_descendant(self):
|
||||
self.assertSelects('div x,y, z', ['xid', 'yid', 'zida', 'zidb', 'zidab', 'zidac'])
|
||||
|
||||
def test_invalid_multiple_select(self):
|
||||
self.assertRaises(ValueError, self.soup.select, ',x, y')
|
||||
self.assertRaises(ValueError, self.soup.select, 'x,,y')
|
||||
|
||||
def test_multiple_select_attrs(self):
|
||||
self.assertSelects('p[lang=en], p[lang=en-gb]', ['lang-en', 'lang-en-gb'])
|
||||
|
||||
def test_multiple_select_ids(self):
|
||||
self.assertSelects('x, y > z[id=zida], z[id=zidab], z[id=zidb]', ['xid', 'zidb', 'zidab'])
|
||||
|
||||
def test_multiple_select_nested(self):
|
||||
self.assertSelects('body > div > x, y > z', ['xid', 'zidb'])
|
||||
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue