Update vendored beets to 1.6.0

Updates colorama to 0.4.6
Adds confuse version 1.7.0
Updates jellyfish to 0.9.0
Adds mediafile 0.10.1
Updates munkres to 1.1.4
Updates musicbrainzngs to 0.7.1
Updates mutagen to 1.46.0
Updates pyyaml to 6.0
Updates unidecode to 1.3.6
This commit is contained in:
Labrys of Knossos 2022-11-28 18:02:40 -05:00
commit 56c6773c6b
385 changed files with 25143 additions and 18080 deletions

View file

@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
@ -16,7 +15,6 @@
"""Fetches, embeds, and displays lyrics.
"""
from __future__ import absolute_import, division, print_function
import difflib
import errno
@ -29,11 +27,11 @@ import requests
import unicodedata
from unidecode import unidecode
import warnings
import six
from six.moves import urllib
import urllib
try:
from bs4 import SoupStrainer, BeautifulSoup
import bs4
from bs4 import SoupStrainer
HAS_BEAUTIFUL_SOUP = True
except ImportError:
HAS_BEAUTIFUL_SOUP = False
@ -48,7 +46,7 @@ try:
# PY3: HTMLParseError was removed in 3.5 as strict mode
# was deprecated in 3.3.
# https://docs.python.org/3.3/library/html.parser.html
from six.moves.html_parser import HTMLParseError
from html.parser import HTMLParseError
except ImportError:
class HTMLParseError(Exception):
pass
@ -62,23 +60,23 @@ COMMENT_RE = re.compile(r'<!--.*-->', re.S)
TAG_RE = re.compile(r'<[^>]*>')
BREAK_RE = re.compile(r'\n?\s*<br([\s|/][^>]*)*>\s*\n?', re.I)
URL_CHARACTERS = {
u'\u2018': u"'",
u'\u2019': u"'",
u'\u201c': u'"',
u'\u201d': u'"',
u'\u2010': u'-',
u'\u2011': u'-',
u'\u2012': u'-',
u'\u2013': u'-',
u'\u2014': u'-',
u'\u2015': u'-',
u'\u2016': u'-',
u'\u2026': u'...',
'\u2018': "'",
'\u2019': "'",
'\u201c': '"',
'\u201d': '"',
'\u2010': '-',
'\u2011': '-',
'\u2012': '-',
'\u2013': '-',
'\u2014': '-',
'\u2015': '-',
'\u2016': '-',
'\u2026': '...',
}
USER_AGENT = 'beets/{}'.format(beets.__version__)
USER_AGENT = f'beets/{beets.__version__}'
# The content for the base index.rst generated in ReST mode.
REST_INDEX_TEMPLATE = u'''Lyrics
REST_INDEX_TEMPLATE = '''Lyrics
======
* :ref:`Song index <genindex>`
@ -94,11 +92,11 @@ Artist index:
'''
# The content for the base conf.py generated.
REST_CONF_TEMPLATE = u'''# -*- coding: utf-8 -*-
REST_CONF_TEMPLATE = '''# -*- coding: utf-8 -*-
master_doc = 'index'
project = u'Lyrics'
copyright = u'none'
author = u'Various Authors'
project = 'Lyrics'
copyright = 'none'
author = 'Various Authors'
latex_documents = [
(master_doc, 'Lyrics.tex', project,
author, 'manual'),
@ -117,7 +115,7 @@ epub_tocdup = False
def unichar(i):
try:
return six.unichr(i)
return chr(i)
except ValueError:
return struct.pack('i', i).decode('utf-32')
@ -126,12 +124,12 @@ def unescape(text):
"""Resolve &#xxx; HTML entities (and some others)."""
if isinstance(text, bytes):
text = text.decode('utf-8', 'ignore')
out = text.replace(u'&nbsp;', u' ')
out = text.replace('&nbsp;', ' ')
def replchar(m):
num = m.group(1)
return unichar(int(num))
out = re.sub(u"&#(\d+);", replchar, out)
out = re.sub("&#(\\d+);", replchar, out)
return out
@ -140,43 +138,10 @@ def extract_text_between(html, start_marker, end_marker):
_, html = html.split(start_marker, 1)
html, _ = html.split(end_marker, 1)
except ValueError:
return u''
return ''
return html
def extract_text_in(html, starttag):
"""Extract the text from a <DIV> tag in the HTML starting with
``starttag``. Returns None if parsing fails.
"""
# Strip off the leading text before opening tag.
try:
_, html = html.split(starttag, 1)
except ValueError:
return
# Walk through balanced DIV tags.
level = 0
parts = []
pos = 0
for match in DIV_RE.finditer(html):
if match.group(1): # Closing tag.
level -= 1
if level == 0:
pos = match.end()
else: # Opening tag.
if level == 0:
parts.append(html[pos:match.start()])
level += 1
if level == -1:
parts.append(html[pos:match.start()])
break
else:
print(u'no closing tag found!')
return
return u''.join(parts)
def search_pairs(item):
"""Yield a pairs of artists and titles to search for.
@ -186,6 +151,9 @@ def search_pairs(item):
In addition to the artist and title obtained from the `item` the
method tries to strip extra information like paranthesized suffixes
and featured artists from the strings and add them as candidates.
The artist sort name is added as a fallback candidate to help in
cases where artist name includes special characters or is in a
non-latin script.
The method also tries to split multiple titles separated with `/`.
"""
def generate_alternatives(string, patterns):
@ -199,19 +167,23 @@ def search_pairs(item):
alternatives.append(match.group(1))
return alternatives
title, artist = item.title, item.artist
title, artist, artist_sort = item.title, item.artist, item.artist_sort
patterns = [
# Remove any featuring artists from the artists name
r"(.*?) {0}".format(plugins.feat_tokens())]
fr"(.*?) {plugins.feat_tokens()}"]
artists = generate_alternatives(artist, patterns)
# Use the artist_sort as fallback only if it differs from artist to avoid
# repeated remote requests with the same search terms
if artist != artist_sort:
artists.append(artist_sort)
patterns = [
# Remove a parenthesized suffix from a title string. Common
# examples include (live), (remix), and (acoustic).
r"(.+?)\s+[(].*[)]$",
# Remove any featuring artists from the title
r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False)),
r"(.*?) {}".format(plugins.feat_tokens(for_artist=False)),
# Remove part of title after colon ':' for songs with subtitles
r"(.+?)\s*:.*"]
titles = generate_alternatives(title, patterns)
@ -245,14 +217,27 @@ def slug(text):
return re.sub(r'\W+', '-', unidecode(text).lower().strip()).strip('-')
class Backend(object):
if HAS_BEAUTIFUL_SOUP:
def try_parse_html(html, **kwargs):
try:
return bs4.BeautifulSoup(html, 'html.parser', **kwargs)
except HTMLParseError:
return None
else:
def try_parse_html(html, **kwargs):
return None
class Backend:
REQUIRES_BS = False
def __init__(self, config, log):
self._log = log
@staticmethod
def _encode(s):
"""Encode the string for inclusion in a URL"""
if isinstance(s, six.text_type):
if isinstance(s, str):
for char, repl in URL_CHARACTERS.items():
s = s.replace(char, repl)
s = s.encode('utf-8', 'ignore')
@ -277,20 +262,21 @@ class Backend(object):
'User-Agent': USER_AGENT,
})
except requests.RequestException as exc:
self._log.debug(u'lyrics request failed: {0}', exc)
self._log.debug('lyrics request failed: {0}', exc)
return
if r.status_code == requests.codes.ok:
return r.text
else:
self._log.debug(u'failed to fetch: {0} ({1})', url, r.status_code)
self._log.debug('failed to fetch: {0} ({1})', url, r.status_code)
return None
def fetch(self, artist, title):
raise NotImplementedError()
class SymbolsReplaced(Backend):
class MusiXmatch(Backend):
REPLACEMENTS = {
r'\s+': '_',
r'\s+': '-',
'<': 'Less_Than',
'>': 'Greater_Than',
'#': 'Number_',
@ -298,39 +284,40 @@ class SymbolsReplaced(Backend):
r'[\]\}]': ')',
}
URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'
@classmethod
def _encode(cls, s):
for old, new in cls.REPLACEMENTS.items():
s = re.sub(old, new, s)
return super(SymbolsReplaced, cls)._encode(s)
class MusiXmatch(SymbolsReplaced):
REPLACEMENTS = dict(SymbolsReplaced.REPLACEMENTS, **{
r'\s+': '-'
})
URL_PATTERN = 'https://www.musixmatch.com/lyrics/%s/%s'
return super()._encode(s)
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return
return None
if "We detected that your IP is blocked" in html:
self._log.warning(u'we are blocked at MusixMatch: url %s failed'
self._log.warning('we are blocked at MusixMatch: url %s failed'
% url)
return
html_part = html.split('<p class="mxm-lyrics__content')[-1]
lyrics = extract_text_between(html_part, '>', '</p>')
return None
html_parts = html.split('<p class="mxm-lyrics__content')
# Sometimes lyrics come in 2 or more parts
lyrics_parts = []
for html_part in html_parts:
lyrics_parts.append(extract_text_between(html_part, '>', '</p>'))
lyrics = '\n'.join(lyrics_parts)
lyrics = lyrics.strip(',"').replace('\\n', '\n')
# another odd case: sometimes only that string remains, for
# missing songs. this seems to happen after being blocked
# above, when filling in the CAPTCHA.
if "Instant lyrics for all your music." in lyrics:
return
return None
# sometimes there are non-existent lyrics with some content
if 'Lyrics | Musixmatch' in lyrics:
return None
return lyrics
@ -341,87 +328,171 @@ class Genius(Backend):
bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/
"""
REQUIRES_BS = True
base_url = "https://api.genius.com"
def __init__(self, config, log):
super(Genius, self).__init__(config, log)
super().__init__(config, log)
self.api_key = config['genius_api_key'].as_str()
self.headers = {
'Authorization': "Bearer %s" % self.api_key,
'User-Agent': USER_AGENT,
}
def lyrics_from_song_api_path(self, song_api_path):
song_url = self.base_url + song_api_path
response = requests.get(song_url, headers=self.headers)
json = response.json()
path = json["response"]["song"]["path"]
# Gotta go regular html scraping... come on Genius.
page_url = "https://genius.com" + path
try:
page = requests.get(page_url)
except requests.RequestException as exc:
self._log.debug(u'Genius page request for {0} failed: {1}',
page_url, exc)
return None
html = BeautifulSoup(page.text, "html.parser")
# Remove script tags that they put in the middle of the lyrics.
[h.extract() for h in html('script')]
# At least Genius is nice and has a tag called 'lyrics'!
# Updated css where the lyrics are based in HTML.
lyrics = html.find("div", class_="lyrics").get_text()
return lyrics
def fetch(self, artist, title):
search_url = self.base_url + "/search"
data = {'q': title}
try:
response = requests.get(search_url, data=data,
headers=self.headers)
except requests.RequestException as exc:
self._log.debug(u'Genius API request failed: {0}', exc)
"""Fetch lyrics from genius.com
Because genius doesn't allow accesssing lyrics via the api,
we first query the api for a url matching our artist & title,
then attempt to scrape that url for the lyrics.
"""
json = self._search(artist, title)
if not json:
self._log.debug('Genius API request returned invalid JSON')
return None
try:
json = response.json()
except ValueError:
self._log.debug(u'Genius API request returned invalid JSON')
return None
song_info = None
# find a matching artist in the json
for hit in json["response"]["hits"]:
if hit["result"]["primary_artist"]["name"] == artist:
song_info = hit
break
hit_artist = hit["result"]["primary_artist"]["name"]
if song_info:
song_api_path = song_info["result"]["api_path"]
return self.lyrics_from_song_api_path(song_api_path)
if slug(hit_artist) == slug(artist):
html = self.fetch_url(hit["result"]["url"])
if not html:
return None
return self._scrape_lyrics_from_html(html)
self._log.debug('Genius failed to find a matching artist for \'{0}\'',
artist)
return None
class LyricsWiki(SymbolsReplaced):
"""Fetch lyrics from LyricsWiki."""
def _search(self, artist, title):
"""Searches the genius api for a given artist and title
URL_PATTERN = 'http://lyrics.wikia.com/%s:%s'
https://docs.genius.com/#search-h2
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
:returns: json response
"""
search_url = self.base_url + "/search"
data = {'q': title + " " + artist.lower()}
try:
response = requests.get(
search_url, data=data, headers=self.headers)
except requests.RequestException as exc:
self._log.debug('Genius API request failed: {0}', exc)
return None
try:
return response.json()
except ValueError:
return None
def _scrape_lyrics_from_html(self, html):
"""Scrape lyrics from a given genius.com html"""
soup = try_parse_html(html)
if not soup:
return
# Get the HTML fragment inside the appropriate HTML element and then
# extract the text from it.
html_frag = extract_text_in(html, u"<div class='lyricbox'>")
if html_frag:
lyrics = _scrape_strip_cruft(html_frag, True)
# Remove script tags that they put in the middle of the lyrics.
[h.extract() for h in soup('script')]
if lyrics and 'Unfortunately, we are not licensed' not in lyrics:
return lyrics
# Most of the time, the page contains a div with class="lyrics" where
# all of the lyrics can be found already correctly formatted
# Sometimes, though, it packages the lyrics into separate divs, most
# likely for easier ad placement
lyrics_div = soup.find("div", class_="lyrics")
if not lyrics_div:
self._log.debug('Received unusual song page html')
verse_div = soup.find("div",
class_=re.compile("Lyrics__Container"))
if not verse_div:
if soup.find("div",
class_=re.compile("LyricsPlaceholder__Message"),
string="This song is an instrumental"):
self._log.debug('Detected instrumental')
return "[Instrumental]"
else:
self._log.debug("Couldn't scrape page using known layouts")
return None
lyrics_div = verse_div.parent
for br in lyrics_div.find_all("br"):
br.replace_with("\n")
ads = lyrics_div.find_all("div",
class_=re.compile("InreadAd__Container"))
for ad in ads:
ad.replace_with("\n")
return lyrics_div.get_text()
class Tekstowo(Backend):
# Fetch lyrics from Tekstowo.pl.
REQUIRES_BS = True
BASE_URL = 'http://www.tekstowo.pl'
URL_PATTERN = BASE_URL + '/wyszukaj.html?search-title=%s&search-artist=%s'
def fetch(self, artist, title):
url = self.build_url(title, artist)
search_results = self.fetch_url(url)
if not search_results:
return None
song_page_url = self.parse_search_results(search_results)
if not song_page_url:
return None
song_page_html = self.fetch_url(song_page_url)
if not song_page_html:
return None
return self.extract_lyrics(song_page_html)
def parse_search_results(self, html):
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
soup = try_parse_html(html)
if not soup:
return None
content_div = soup.find("div", class_="content")
if not content_div:
return None
card_div = content_div.find("div", class_="card")
if not card_div:
return None
song_rows = card_div.find_all("div", class_="box-przeboje")
if not song_rows:
return None
song_row = song_rows[0]
if not song_row:
return None
link = song_row.find('a')
if not link:
return None
return self.BASE_URL + link.get('href')
def extract_lyrics(self, html):
html = _scrape_strip_cruft(html)
html = _scrape_merge_paragraphs(html)
soup = try_parse_html(html)
if not soup:
return None
lyrics_div = soup.find("div", class_="song-text")
if not lyrics_div:
return None
return lyrics_div.get_text()
def remove_credits(text):
@ -446,7 +517,8 @@ def _scrape_strip_cruft(html, plain_text_out=False):
html = html.replace('\r', '\n') # Normalize EOL.
html = re.sub(r' +', ' ', html) # Whitespaces collapse.
html = BREAK_RE.sub('\n', html) # <br> eats up surrounding '\n'.
html = re.sub(r'<(script).*?</\1>(?s)', '', html) # Strip script tags.
html = re.sub(r'(?s)<(script).*?</\1>', '', html) # Strip script tags.
html = re.sub('\u2005', " ", html) # replace unicode with regular space
if plain_text_out: # Strip remaining HTML tags
html = COMMENT_RE.sub('', html)
@ -466,12 +538,6 @@ def scrape_lyrics_from_html(html):
"""Scrape lyrics from a URL. If no lyrics can be found, return None
instead.
"""
if not HAS_BEAUTIFUL_SOUP:
return None
if not html:
return None
def is_text_notcode(text):
length = len(text)
return (length > 20 and
@ -481,10 +547,8 @@ def scrape_lyrics_from_html(html):
html = _scrape_merge_paragraphs(html)
# extract all long text blocks that are not code
try:
soup = BeautifulSoup(html, "html.parser",
parse_only=SoupStrainer(text=is_text_notcode))
except HTMLParseError:
soup = try_parse_html(html, parse_only=SoupStrainer(text=is_text_notcode))
if not soup:
return None
# Get the longest text element (if any).
@ -498,8 +562,10 @@ def scrape_lyrics_from_html(html):
class Google(Backend):
"""Fetch lyrics from Google search results."""
REQUIRES_BS = True
def __init__(self, config, log):
super(Google, self).__init__(config, log)
super().__init__(config, log)
self.api_key = config['google_API_key'].as_str()
self.engine_id = config['google_engine_ID'].as_str()
@ -511,7 +577,7 @@ class Google(Backend):
bad_triggers_occ = []
nb_lines = text.count('\n')
if nb_lines <= 1:
self._log.debug(u"Ignoring too short lyrics '{0}'", text)
self._log.debug("Ignoring too short lyrics '{0}'", text)
return False
elif nb_lines < 5:
bad_triggers_occ.append('too_short')
@ -522,14 +588,14 @@ class Google(Backend):
bad_triggers = ['lyrics', 'copyright', 'property', 'links']
if artist:
bad_triggers_occ += [artist]
bad_triggers += [artist]
for item in bad_triggers:
bad_triggers_occ += [item] * len(re.findall(r'\W%s\W' % item,
text, re.I))
if bad_triggers_occ:
self._log.debug(u'Bad triggers detected: {0}', bad_triggers_occ)
self._log.debug('Bad triggers detected: {0}', bad_triggers_occ)
return len(bad_triggers_occ) < 2
def slugify(self, text):
@ -537,14 +603,14 @@ class Google(Backend):
"""
text = re.sub(r"[-'_\s]", '_', text)
text = re.sub(r"_+", '_', text).strip('_')
pat = "([^,\(]*)\((.*?)\)" # Remove content within parentheses
text = re.sub(pat, '\g<1>', text).strip()
pat = r"([^,\(]*)\((.*?)\)" # Remove content within parentheses
text = re.sub(pat, r'\g<1>', text).strip()
try:
text = unicodedata.normalize('NFKD', text).encode('ascii',
'ignore')
text = six.text_type(re.sub('[-\s]+', ' ', text.decode('utf-8')))
text = str(re.sub(r'[-\s]+', ' ', text.decode('utf-8')))
except UnicodeDecodeError:
self._log.exception(u"Failing to normalize '{0}'", text)
self._log.exception("Failing to normalize '{0}'", text)
return text
BY_TRANS = ['by', 'par', 'de', 'von']
@ -556,7 +622,7 @@ class Google(Backend):
"""
title = self.slugify(title.lower())
artist = self.slugify(artist.lower())
sitename = re.search(u"//([^/]+)/.*",
sitename = re.search("//([^/]+)/.*",
self.slugify(url_link.lower())).group(1)
url_title = self.slugify(url_title.lower())
@ -570,7 +636,7 @@ class Google(Backend):
[artist, sitename, sitename.replace('www.', '')] + \
self.LYRICS_TRANS
tokens = [re.escape(t) for t in tokens]
song_title = re.sub(u'(%s)' % u'|'.join(tokens), u'', url_title)
song_title = re.sub('(%s)' % '|'.join(tokens), '', url_title)
song_title = song_title.strip('_|')
typo_ratio = .9
@ -578,53 +644,57 @@ class Google(Backend):
return ratio >= typo_ratio
def fetch(self, artist, title):
query = u"%s %s" % (artist, title)
url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \
query = f"{artist} {title}"
url = 'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \
% (self.api_key, self.engine_id,
urllib.parse.quote(query.encode('utf-8')))
data = self.fetch_url(url)
if not data:
self._log.debug(u'google backend returned no data')
self._log.debug('google backend returned no data')
return None
try:
data = json.loads(data)
except ValueError as exc:
self._log.debug(u'google backend returned malformed JSON: {}', exc)
self._log.debug('google backend returned malformed JSON: {}', exc)
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug(u'google backend error: {0}', reason)
self._log.debug('google backend error: {0}', reason)
return None
if 'items' in data.keys():
for item in data['items']:
url_link = item['link']
url_title = item.get('title', u'')
url_title = item.get('title', '')
if not self.is_page_candidate(url_link, url_title,
title, artist):
continue
html = self.fetch_url(url_link)
if not html:
continue
lyrics = scrape_lyrics_from_html(html)
if not lyrics:
continue
if self.is_lyrics(lyrics, artist):
self._log.debug(u'got lyrics from {0}',
self._log.debug('got lyrics from {0}',
item['displayLink'])
return lyrics
return None
class LyricsPlugin(plugins.BeetsPlugin):
SOURCES = ['google', 'lyricwiki', 'musixmatch', 'genius']
SOURCES = ['google', 'musixmatch', 'genius', 'tekstowo']
SOURCE_BACKENDS = {
'google': Google,
'lyricwiki': LyricsWiki,
'musixmatch': MusiXmatch,
'genius': Genius,
'tekstowo': Tekstowo,
}
def __init__(self):
super(LyricsPlugin, self).__init__()
super().__init__()
self.import_stages = [self.imported]
self.config.add({
'auto': True,
@ -632,7 +702,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
'bing_lang_from': [],
'bing_lang_to': None,
'google_API_key': None,
'google_engine_ID': u'009217259823014548361:lndtuqkycfu',
'google_engine_ID': '009217259823014548361:lndtuqkycfu',
'genius_api_key':
"Ryq93pUGm8bM6eUWwD_M3NOFFDAtp2yEE7W"
"76V-uFL5jks5dNvcGCdarqFjDhP9c",
@ -648,7 +718,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
# State information for the ReST writer.
# First, the current artist we're writing.
self.artist = u'Unknown artist'
self.artist = 'Unknown artist'
# The current album: False means no album yet.
self.album = False
# The current rest file content. None means the file is not
@ -659,40 +729,44 @@ class LyricsPlugin(plugins.BeetsPlugin):
sources = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
if not HAS_BEAUTIFUL_SOUP:
sources = self.sanitize_bs_sources(sources)
if 'google' in sources:
if not self.config['google_API_key'].get():
# We log a *debug* message here because the default
# configuration includes `google`. This way, the source
# is silent by default but can be enabled just by
# setting an API key.
self._log.debug(u'Disabling google source: '
u'no API key configured.')
self._log.debug('Disabling google source: '
'no API key configured.')
sources.remove('google')
elif not HAS_BEAUTIFUL_SOUP:
self._log.warning(u'To use the google lyrics source, you must '
u'install the beautifulsoup4 module. See '
u'the documentation for further details.')
sources.remove('google')
if 'genius' in sources and not HAS_BEAUTIFUL_SOUP:
self._log.debug(
u'The Genius backend requires BeautifulSoup, which is not '
u'installed, so the source is disabled.'
)
sources.remove('genius')
self.config['bing_lang_from'] = [
x.lower() for x in self.config['bing_lang_from'].as_str_seq()]
self.bing_auth_token = None
if not HAS_LANGDETECT and self.config['bing_client_secret'].get():
self._log.warning(u'To use bing translations, you need to '
u'install the langdetect module. See the '
u'documentation for further details.')
self._log.warning('To use bing translations, you need to '
'install the langdetect module. See the '
'documentation for further details.')
self.backends = [self.SOURCE_BACKENDS[source](self.config, self._log)
for source in sources]
def sanitize_bs_sources(self, sources):
enabled_sources = []
for source in sources:
if self.SOURCE_BACKENDS[source].REQUIRES_BS:
self._log.debug('To use the %s lyrics source, you must '
'install the beautifulsoup4 module. See '
'the documentation for further details.'
% source)
else:
enabled_sources.append(source)
return enabled_sources
def get_bing_access_token(self):
params = {
'client_id': 'beets',
@ -708,30 +782,30 @@ class LyricsPlugin(plugins.BeetsPlugin):
if 'access_token' in oauth_token:
return "Bearer " + oauth_token['access_token']
else:
self._log.warning(u'Could not get Bing Translate API access token.'
u' Check your "bing_client_secret" password')
self._log.warning('Could not get Bing Translate API access token.'
' Check your "bing_client_secret" password')
def commands(self):
cmd = ui.Subcommand('lyrics', help='fetch song lyrics')
cmd.parser.add_option(
u'-p', u'--print', dest='printlyr',
'-p', '--print', dest='printlyr',
action='store_true', default=False,
help=u'print lyrics to console',
help='print lyrics to console',
)
cmd.parser.add_option(
u'-r', u'--write-rest', dest='writerest',
'-r', '--write-rest', dest='writerest',
action='store', default=None, metavar='dir',
help=u'write lyrics to given directory as ReST files',
help='write lyrics to given directory as ReST files',
)
cmd.parser.add_option(
u'-f', u'--force', dest='force_refetch',
'-f', '--force', dest='force_refetch',
action='store_true', default=False,
help=u'always re-download lyrics',
help='always re-download lyrics',
)
cmd.parser.add_option(
u'-l', u'--local', dest='local_only',
'-l', '--local', dest='local_only',
action='store_true', default=False,
help=u'do not fetch missing lyrics',
help='do not fetch missing lyrics',
)
def func(lib, opts, args):
@ -740,7 +814,8 @@ class LyricsPlugin(plugins.BeetsPlugin):
write = ui.should_write()
if opts.writerest:
self.writerest_indexes(opts.writerest)
for item in lib.items(ui.decargs(args)):
items = lib.items(ui.decargs(args))
for item in items:
if not opts.local_only and not self.config['local']:
self.fetch_item_lyrics(
lib, item, write,
@ -750,51 +825,55 @@ class LyricsPlugin(plugins.BeetsPlugin):
if opts.printlyr:
ui.print_(item.lyrics)
if opts.writerest:
self.writerest(opts.writerest, item)
if opts.writerest:
# flush last artist
self.writerest(opts.writerest, None)
ui.print_(u'ReST files generated. to build, use one of:')
ui.print_(u' sphinx-build -b html %s _build/html'
self.appendrest(opts.writerest, item)
if opts.writerest and items:
# flush last artist & write to ReST
self.writerest(opts.writerest)
ui.print_('ReST files generated. to build, use one of:')
ui.print_(' sphinx-build -b html %s _build/html'
% opts.writerest)
ui.print_(u' sphinx-build -b epub %s _build/epub'
ui.print_(' sphinx-build -b epub %s _build/epub'
% opts.writerest)
ui.print_((u' sphinx-build -b latex %s _build/latex '
u'&& make -C _build/latex all-pdf')
ui.print_((' sphinx-build -b latex %s _build/latex '
'&& make -C _build/latex all-pdf')
% opts.writerest)
cmd.func = func
return [cmd]
def writerest(self, directory, item):
"""Write the item to an ReST file
def appendrest(self, directory, item):
"""Append the item to an ReST file
This will keep state (in the `rest` variable) in order to avoid
writing continuously to the same files.
"""
if item is None or slug(self.artist) != slug(item.albumartist):
if self.rest is not None:
path = os.path.join(directory, 'artists',
slug(self.artist) + u'.rst')
with open(path, 'wb') as output:
output.write(self.rest.encode('utf-8'))
self.rest = None
if item is None:
return
if slug(self.artist) != slug(item.albumartist):
# Write current file and start a new one ~ item.albumartist
self.writerest(directory)
self.artist = item.albumartist.strip()
self.rest = u"%s\n%s\n\n.. contents::\n :local:\n\n" \
self.rest = "%s\n%s\n\n.. contents::\n :local:\n\n" \
% (self.artist,
u'=' * len(self.artist))
'=' * len(self.artist))
if self.album != item.album:
tmpalbum = self.album = item.album.strip()
if self.album == '':
tmpalbum = u'Unknown album'
self.rest += u"%s\n%s\n\n" % (tmpalbum, u'-' * len(tmpalbum))
title_str = u":index:`%s`" % item.title.strip()
block = u'| ' + item.lyrics.replace(u'\n', u'\n| ')
self.rest += u"%s\n%s\n\n%s\n\n" % (title_str,
u'~' * len(title_str),
block)
tmpalbum = 'Unknown album'
self.rest += "{}\n{}\n\n".format(tmpalbum, '-' * len(tmpalbum))
title_str = ":index:`%s`" % item.title.strip()
block = '| ' + item.lyrics.replace('\n', '\n| ')
self.rest += "{}\n{}\n\n{}\n\n".format(title_str,
'~' * len(title_str),
block)
def writerest(self, directory):
"""Write self.rest to a ReST file
"""
if self.rest is not None and self.artist is not None:
path = os.path.join(directory, 'artists',
slug(self.artist) + '.rst')
with open(path, 'wb') as output:
output.write(self.rest.encode('utf-8'))
def writerest_indexes(self, directory):
"""Write conf.py and index.rst files necessary for Sphinx
@ -832,7 +911,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
"""
# Skip if the item already has lyrics.
if not force and item.lyrics:
self._log.info(u'lyrics already present: {0}', item)
self._log.info('lyrics already present: {0}', item)
return
lyrics = None
@ -841,10 +920,10 @@ class LyricsPlugin(plugins.BeetsPlugin):
if any(lyrics):
break
lyrics = u"\n\n---\n\n".join([l for l in lyrics if l])
lyrics = "\n\n---\n\n".join([l for l in lyrics if l])
if lyrics:
self._log.info(u'fetched lyrics: {0}', item)
self._log.info('fetched lyrics: {0}', item)
if HAS_LANGDETECT and self.config['bing_client_secret'].get():
lang_from = langdetect.detect(lyrics)
if self.config['bing_lang_to'].get() != lang_from and (
@ -854,7 +933,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
lyrics = self.append_translation(
lyrics, self.config['bing_lang_to'])
else:
self._log.info(u'lyrics not found: {0}', item)
self._log.info('lyrics not found: {0}', item)
fallback = self.config['fallback'].get()
if fallback:
lyrics = fallback
@ -872,12 +951,12 @@ class LyricsPlugin(plugins.BeetsPlugin):
for backend in self.backends:
lyrics = backend.fetch(artist, title)
if lyrics:
self._log.debug(u'got lyrics from backend: {0}',
self._log.debug('got lyrics from backend: {0}',
backend.__class__.__name__)
return _scrape_strip_cruft(lyrics, True)
def append_translation(self, text, to_lang):
import xml.etree.ElementTree as ET
from xml.etree import ElementTree
if not self.bing_auth_token:
self.bing_auth_token = self.get_bing_access_token()
@ -895,10 +974,11 @@ class LyricsPlugin(plugins.BeetsPlugin):
self.bing_auth_token = None
return self.append_translation(text, to_lang)
return text
lines_translated = ET.fromstring(r.text.encode('utf-8')).text
lines_translated = ElementTree.fromstring(
r.text.encode('utf-8')).text
# Use a translation mapping dict to build resulting lyrics
translations = dict(zip(text_lines, lines_translated.split('|')))
result = ''
for line in text.split('\n'):
result += '%s / %s\n' % (line, translations[line])
result += '{} / {}\n'.format(line, translations[line])
return result