Update beets to 1.4.7

Also updates:
- colorama-0.4.1
- jellyfish-0.6.1
- munkres-1.0.12
- musicbrainzngs-0.6
- mutagen-1.41.1
- pyyaml-3.13
- six-1.12.0
- unidecode-1.0.23
This commit is contained in:
Labrys of Knossos 2018-12-15 00:52:11 -05:00
parent 05b0fb498f
commit e854005ae1
193 changed files with 15896 additions and 6384 deletions

BIN
libs/_yaml.cp37-win32.pyd Normal file

Binary file not shown.

View file

@ -19,7 +19,7 @@ import os
from beets.util import confit from beets.util import confit
__version__ = u'1.3.18' __version__ = u'1.4.7'
__author__ = u'Adrian Sampson <adrian@radbox.org>' __author__ = u'Adrian Sampson <adrian@radbox.org>'

26
libs/beets/__main__.py Normal file
View file

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The __main__ module lets you run the beets CLI interface by typing
`python -m beets`.
"""
from __future__ import division, absolute_import, print_function
import sys
from .ui import main
if __name__ == "__main__":
main(sys.argv[1:])

View file

@ -22,10 +22,9 @@ from __future__ import division, absolute_import, print_function
import subprocess import subprocess
import platform import platform
from tempfile import NamedTemporaryFile from tempfile import NamedTemporaryFile
import imghdr
import os import os
from beets.util import displayable_path, syspath from beets.util import displayable_path, syspath, bytestring_path
from beets.util.artresizer import ArtResizer from beets.util.artresizer import ArtResizer
from beets import mediafile from beets import mediafile
@ -124,26 +123,49 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
is_windows = platform.system() == "Windows" is_windows = platform.system() == "Windows"
# Converting images to grayscale tends to minimize the weight # Converting images to grayscale tends to minimize the weight
# of colors in the diff score. # of colors in the diff score. So we first convert both images
# to grayscale and then pipe them into the `compare` command.
# On Windows, ImageMagick doesn't support the magic \\?\ prefix
# on paths, so we pass `prefix=False` to `syspath`.
convert_cmd = ['convert', syspath(imagepath, prefix=False),
syspath(art, prefix=False),
'-colorspace', 'gray', 'MIFF:-']
compare_cmd = ['compare', '-metric', 'PHASH', '-', 'null:']
log.debug(u'comparing images with pipeline {} | {}',
convert_cmd, compare_cmd)
convert_proc = subprocess.Popen( convert_proc = subprocess.Popen(
[b'convert', syspath(imagepath), syspath(art), convert_cmd,
b'-colorspace', b'gray', b'MIFF:-'],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows, close_fds=not is_windows,
) )
compare_proc = subprocess.Popen( compare_proc = subprocess.Popen(
[b'compare', b'-metric', b'PHASH', b'-', b'null:'], compare_cmd,
stdin=convert_proc.stdout, stdin=convert_proc.stdout,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
close_fds=not is_windows, close_fds=not is_windows,
) )
convert_proc.stdout.close()
# Check the convert output. We're not interested in the
# standard output; that gets piped to the next stage.
convert_proc.stdout.close()
convert_stderr = convert_proc.stderr.read()
convert_proc.stderr.close()
convert_proc.wait()
if convert_proc.returncode:
log.debug(
u'ImageMagick convert failed with status {}: {!r}',
convert_proc.returncode,
convert_stderr,
)
return
# Check the compare output.
stdout, stderr = compare_proc.communicate() stdout, stderr = compare_proc.communicate()
if compare_proc.returncode: if compare_proc.returncode:
if compare_proc.returncode != 1: if compare_proc.returncode != 1:
log.debug(u'IM phashes compare failed for {0}, {1}', log.debug(u'ImageMagick compare failed: {0}, {1}',
displayable_path(imagepath), displayable_path(imagepath),
displayable_path(art)) displayable_path(art))
return return
@ -157,7 +179,7 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
log.debug(u'IM output is not a number: {0!r}', out_str) log.debug(u'IM output is not a number: {0!r}', out_str)
return return
log.debug(u'compare PHASH score is {0}', phash_diff) log.debug(u'ImageMagick compare score: {0}', phash_diff)
return phash_diff <= compare_threshold return phash_diff <= compare_threshold
return True return True
@ -165,18 +187,18 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
def extract(log, outpath, item): def extract(log, outpath, item):
art = get_art(log, item) art = get_art(log, item)
outpath = bytestring_path(outpath)
if not art: if not art:
log.info(u'No album art present in {0}, skipping.', item) log.info(u'No album art present in {0}, skipping.', item)
return return
# Add an extension to the filename. # Add an extension to the filename.
ext = imghdr.what(None, h=art) ext = mediafile.image_extension(art)
if not ext: if not ext:
log.warning(u'Unknown image type in {0}.', log.warning(u'Unknown image type in {0}.',
displayable_path(item.path)) displayable_path(item.path))
return return
outpath += b'.' + ext outpath += bytestring_path('.' + ext)
log.info(u'Extracting album art from: {0} to: {1}', log.info(u'Extracting album art from: {0} to: {1}',
item, displayable_path(outpath)) item, displayable_path(outpath))

View file

@ -23,7 +23,7 @@ from beets import config
# Parts of external interface. # Parts of external interface.
from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa
from .match import tag_item, tag_album # noqa from .match import tag_item, tag_album, Proposal # noqa
from .match import Recommendation # noqa from .match import Recommendation # noqa
# Global logger. # Global logger.
@ -40,10 +40,21 @@ def apply_item_metadata(item, track_info):
item.artist_credit = track_info.artist_credit item.artist_credit = track_info.artist_credit
item.title = track_info.title item.title = track_info.title
item.mb_trackid = track_info.track_id item.mb_trackid = track_info.track_id
item.mb_releasetrackid = track_info.release_track_id
if track_info.artist_id: if track_info.artist_id:
item.mb_artistid = track_info.artist_id item.mb_artistid = track_info.artist_id
if track_info.data_source: if track_info.data_source:
item.data_source = track_info.data_source item.data_source = track_info.data_source
if track_info.lyricist is not None:
item.lyricist = track_info.lyricist
if track_info.composer is not None:
item.composer = track_info.composer
if track_info.composer_sort is not None:
item.composer_sort = track_info.composer_sort
if track_info.arranger is not None:
item.arranger = track_info.arranger
# At the moment, the other metadata is left intact (including album # At the moment, the other metadata is left intact (including album
# and track number). Perhaps these should be emptied? # and track number). Perhaps these should be emptied?
@ -52,13 +63,20 @@ def apply_metadata(album_info, mapping):
"""Set the items' metadata to match an AlbumInfo object using a """Set the items' metadata to match an AlbumInfo object using a
mapping from Items to TrackInfo objects. mapping from Items to TrackInfo objects.
""" """
for item, track_info in mapping.iteritems(): for item, track_info in mapping.items():
# Album, artist, track count. # Artist or artist credit.
if track_info.artist: if config['artist_credit']:
item.artist = track_info.artist item.artist = (track_info.artist_credit or
track_info.artist or
album_info.artist_credit or
album_info.artist)
item.albumartist = (album_info.artist_credit or
album_info.artist)
else: else:
item.artist = album_info.artist item.artist = (track_info.artist or album_info.artist)
item.albumartist = album_info.artist item.albumartist = album_info.artist
# Album.
item.album = album_info.album item.album = album_info.album
# Artist sort and credit names. # Artist sort and credit names.
@ -97,8 +115,9 @@ def apply_metadata(album_info, mapping):
if config['per_disc_numbering']: if config['per_disc_numbering']:
# We want to let the track number be zero, but if the medium index # We want to let the track number be zero, but if the medium index
# is not provided we need to fall back to the overall index. # is not provided we need to fall back to the overall index.
if track_info.medium_index is not None:
item.track = track_info.medium_index item.track = track_info.medium_index
if item.track is None: else:
item.track = track_info.index item.track = track_info.index
item.tracktotal = track_info.medium_total or len(album_info.tracks) item.tracktotal = track_info.medium_total or len(album_info.tracks)
else: else:
@ -111,6 +130,7 @@ def apply_metadata(album_info, mapping):
# MusicBrainz IDs. # MusicBrainz IDs.
item.mb_trackid = track_info.track_id item.mb_trackid = track_info.track_id
item.mb_releasetrackid = track_info.release_track_id
item.mb_albumid = album_info.album_id item.mb_albumid = album_info.album_id
if track_info.artist_id: if track_info.artist_id:
item.mb_artistid = track_info.artist_id item.mb_artistid = track_info.artist_id
@ -141,3 +161,14 @@ def apply_metadata(album_info, mapping):
if track_info.media is not None: if track_info.media is not None:
item.media = track_info.media item.media = track_info.media
if track_info.lyricist is not None:
item.lyricist = track_info.lyricist
if track_info.composer is not None:
item.composer = track_info.composer
if track_info.composer_sort is not None:
item.composer_sort = track_info.composer_sort
if track_info.arranger is not None:
item.arranger = track_info.arranger
item.track_alt = track_info.track_alt

View file

@ -17,14 +17,17 @@
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
from collections import namedtuple from collections import namedtuple
from functools import total_ordering
import re import re
from beets import logging from beets import logging
from beets import plugins from beets import plugins
from beets import config from beets import config
from beets.util import as_string
from beets.autotag import mb from beets.autotag import mb
from jellyfish import levenshtein_distance from jellyfish import levenshtein_distance
from unidecode import unidecode from unidecode import unidecode
import six
log = logging.getLogger('beets') log = logging.getLogger('beets')
@ -104,7 +107,7 @@ class AlbumInfo(object):
# Work around a bug in python-musicbrainz-ngs that causes some # Work around a bug in python-musicbrainz-ngs that causes some
# strings to be bytes rather than Unicode. # strings to be bytes rather than Unicode.
# https://github.com/alastair/python-musicbrainz-ngs/issues/85 # https://github.com/alastair/python-musicbrainz-ngs/issues/85
def decode(self, codec='utf8'): def decode(self, codec='utf-8'):
"""Ensure that all string attributes on this object, and the """Ensure that all string attributes on this object, and the
constituent `TrackInfo` objects, are decoded to Unicode. constituent `TrackInfo` objects, are decoded to Unicode.
""" """
@ -126,6 +129,8 @@ class TrackInfo(object):
- ``title``: name of the track - ``title``: name of the track
- ``track_id``: MusicBrainz ID; UUID fragment only - ``track_id``: MusicBrainz ID; UUID fragment only
- ``release_track_id``: MusicBrainz ID respective to a track on a
particular release; UUID fragment only
- ``artist``: individual track artist name - ``artist``: individual track artist name
- ``artist_id`` - ``artist_id``
- ``length``: float: duration of the track in seconds - ``length``: float: duration of the track in seconds
@ -139,18 +144,25 @@ class TrackInfo(object):
- ``artist_credit``: Recording-specific artist name - ``artist_credit``: Recording-specific artist name
- ``data_source``: The original data source (MusicBrainz, Discogs, etc.) - ``data_source``: The original data source (MusicBrainz, Discogs, etc.)
- ``data_url``: The data source release URL. - ``data_url``: The data source release URL.
- ``lyricist``: individual track lyricist name
- ``composer``: individual track composer name
- ``composer_sort``: individual track composer sort name
- ``arranger`: individual track arranger name
- ``track_alt``: alternative track number (tape, vinyl, etc.)
Only ``title`` and ``track_id`` are required. The rest of the fields Only ``title`` and ``track_id`` are required. The rest of the fields
may be None. The indices ``index``, ``medium``, and ``medium_index`` may be None. The indices ``index``, ``medium``, and ``medium_index``
are all 1-based. are all 1-based.
""" """
def __init__(self, title, track_id, artist=None, artist_id=None, def __init__(self, title, track_id, release_track_id=None, artist=None,
length=None, index=None, medium=None, medium_index=None, artist_id=None, length=None, index=None, medium=None,
medium_total=None, artist_sort=None, disctitle=None, medium_index=None, medium_total=None, artist_sort=None,
artist_credit=None, data_source=None, data_url=None, disctitle=None, artist_credit=None, data_source=None,
media=None): data_url=None, media=None, lyricist=None, composer=None,
composer_sort=None, arranger=None, track_alt=None):
self.title = title self.title = title
self.track_id = track_id self.track_id = track_id
self.release_track_id = release_track_id
self.artist = artist self.artist = artist
self.artist_id = artist_id self.artist_id = artist_id
self.length = length self.length = length
@ -164,9 +176,14 @@ class TrackInfo(object):
self.artist_credit = artist_credit self.artist_credit = artist_credit
self.data_source = data_source self.data_source = data_source
self.data_url = data_url self.data_url = data_url
self.lyricist = lyricist
self.composer = composer
self.composer_sort = composer_sort
self.arranger = arranger
self.track_alt = track_alt
# As above, work around a bug in python-musicbrainz-ngs. # As above, work around a bug in python-musicbrainz-ngs.
def decode(self, codec='utf8'): def decode(self, codec='utf-8'):
"""Ensure that all string attributes on this object are decoded """Ensure that all string attributes on this object are decoded
to Unicode. to Unicode.
""" """
@ -203,10 +220,10 @@ def _string_dist_basic(str1, str2):
transliteration/lowering to ASCII characters. Normalized by string transliteration/lowering to ASCII characters. Normalized by string
length. length.
""" """
assert isinstance(str1, unicode) assert isinstance(str1, six.text_type)
assert isinstance(str2, unicode) assert isinstance(str2, six.text_type)
str1 = unidecode(str1).decode('ascii') str1 = as_string(unidecode(str1))
str2 = unidecode(str2).decode('ascii') str2 = as_string(unidecode(str2))
str1 = re.sub(r'[^a-z0-9]', '', str1.lower()) str1 = re.sub(r'[^a-z0-9]', '', str1.lower())
str2 = re.sub(r'[^a-z0-9]', '', str2.lower()) str2 = re.sub(r'[^a-z0-9]', '', str2.lower())
if not str1 and not str2: if not str1 and not str2:
@ -288,6 +305,8 @@ class LazyClassProperty(object):
return self.value return self.value
@total_ordering
@six.python_2_unicode_compatible
class Distance(object): class Distance(object):
"""Keeps track of multiple distance penalties. Provides a single """Keeps track of multiple distance penalties. Provides a single
weighted distance for all penalties as well as a weighted distance weighted distance for all penalties as well as a weighted distance
@ -323,7 +342,7 @@ class Distance(object):
"""Return the maximum distance penalty (normalization factor). """Return the maximum distance penalty (normalization factor).
""" """
dist_max = 0.0 dist_max = 0.0
for key, penalty in self._penalties.iteritems(): for key, penalty in self._penalties.items():
dist_max += len(penalty) * self._weights[key] dist_max += len(penalty) * self._weights[key]
return dist_max return dist_max
@ -332,7 +351,7 @@ class Distance(object):
"""Return the raw (denormalized) distance. """Return the raw (denormalized) distance.
""" """
dist_raw = 0.0 dist_raw = 0.0
for key, penalty in self._penalties.iteritems(): for key, penalty in self._penalties.items():
dist_raw += sum(penalty) * self._weights[key] dist_raw += sum(penalty) * self._weights[key]
return dist_raw return dist_raw
@ -354,10 +373,16 @@ class Distance(object):
key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0]) key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0])
) )
def __hash__(self):
return id(self)
def __eq__(self, other):
return self.distance == other
# Behave like a float. # Behave like a float.
def __cmp__(self, other): def __lt__(self, other):
return cmp(self.distance, other) return self.distance < other
def __float__(self): def __float__(self):
return self.distance return self.distance
@ -368,7 +393,7 @@ class Distance(object):
def __rsub__(self, other): def __rsub__(self, other):
return other - self.distance return other - self.distance
def __unicode__(self): def __str__(self):
return "{0:.2f}".format(self.distance) return "{0:.2f}".format(self.distance)
# Behave like a dict. # Behave like a dict.
@ -398,7 +423,7 @@ class Distance(object):
raise ValueError( raise ValueError(
u'`dist` must be a Distance object, not {0}'.format(type(dist)) u'`dist` must be a Distance object, not {0}'.format(type(dist))
) )
for key, penalties in dist._penalties.iteritems(): for key, penalties in dist._penalties.items():
self._penalties.setdefault(key, []).extend(penalties) self._penalties.setdefault(key, []).extend(penalties)
# Adding components. # Adding components.
@ -537,24 +562,27 @@ def track_for_mbid(recording_id):
def albums_for_id(album_id): def albums_for_id(album_id):
"""Get a list of albums for an ID.""" """Get a list of albums for an ID."""
candidates = [album_for_mbid(album_id)] a = album_for_mbid(album_id)
plugin_albums = plugins.album_for_id(album_id) if a:
for a in plugin_albums: yield a
for a in plugins.album_for_id(album_id):
if a:
plugins.send(u'albuminfo_received', info=a) plugins.send(u'albuminfo_received', info=a)
candidates.extend(plugin_albums) yield a
return filter(None, candidates)
def tracks_for_id(track_id): def tracks_for_id(track_id):
"""Get a list of tracks for an ID.""" """Get a list of tracks for an ID."""
candidates = [track_for_mbid(track_id)] t = track_for_mbid(track_id)
plugin_tracks = plugins.track_for_id(track_id) if t:
for t in plugin_tracks: yield t
for t in plugins.track_for_id(track_id):
if t:
plugins.send(u'trackinfo_received', info=t) plugins.send(u'trackinfo_received', info=t)
candidates.extend(plugin_tracks) yield t
return filter(None, candidates)
@plugins.notify_info_yielded(u'albuminfo_received')
def album_candidates(items, artist, album, va_likely): def album_candidates(items, artist, album, va_likely):
"""Search for album matches. ``items`` is a list of Item objects """Search for album matches. ``items`` is a list of Item objects
that make up the album. ``artist`` and ``album`` are the respective that make up the album. ``artist`` and ``album`` are the respective
@ -562,51 +590,42 @@ def album_candidates(items, artist, album, va_likely):
entered by the user. ``va_likely`` is a boolean indicating whether entered by the user. ``va_likely`` is a boolean indicating whether
the album is likely to be a "various artists" release. the album is likely to be a "various artists" release.
""" """
out = []
# Base candidates if we have album and artist to match. # Base candidates if we have album and artist to match.
if artist and album: if artist and album:
try: try:
out.extend(mb.match_album(artist, album, len(items))) for candidate in mb.match_album(artist, album, len(items)):
yield candidate
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
# Also add VA matches from MusicBrainz where appropriate. # Also add VA matches from MusicBrainz where appropriate.
if va_likely and album: if va_likely and album:
try: try:
out.extend(mb.match_album(None, album, len(items))) for candidate in mb.match_album(None, album, len(items)):
yield candidate
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
# Candidates from plugins. # Candidates from plugins.
out.extend(plugins.candidates(items, artist, album, va_likely)) for candidate in plugins.candidates(items, artist, album, va_likely):
yield candidate
# Notify subscribed plugins about fetched album info
for a in out:
plugins.send(u'albuminfo_received', info=a)
return out
@plugins.notify_info_yielded(u'trackinfo_received')
def item_candidates(item, artist, title): def item_candidates(item, artist, title):
"""Search for item matches. ``item`` is the Item to be matched. """Search for item matches. ``item`` is the Item to be matched.
``artist`` and ``title`` are strings and either reflect the item or ``artist`` and ``title`` are strings and either reflect the item or
are specified by the user. are specified by the user.
""" """
out = []
# MusicBrainz candidates. # MusicBrainz candidates.
if artist and title: if artist and title:
try: try:
out.extend(mb.match_track(artist, title)) for candidate in mb.match_track(artist, title):
yield candidate
except mb.MusicBrainzAPIError as exc: except mb.MusicBrainzAPIError as exc:
exc.log(log) exc.log(log)
# Plugin candidates. # Plugin candidates.
out.extend(plugins.item_candidates(item, artist, title)) for candidate in plugins.item_candidates(item, artist, title):
yield candidate
# Notify subscribed plugins about fetched track info
for i in out:
plugins.send(u'trackinfo_received', info=i)
return out

View file

@ -22,6 +22,7 @@ from __future__ import division, absolute_import, print_function
import datetime import datetime
import re import re
from munkres import Munkres from munkres import Munkres
from collections import namedtuple
from beets import logging from beets import logging
from beets import plugins from beets import plugins
@ -29,7 +30,6 @@ from beets import config
from beets.util import plurality from beets.util import plurality
from beets.autotag import hooks from beets.autotag import hooks
from beets.util.enumeration import OrderedEnum from beets.util.enumeration import OrderedEnum
from functools import reduce
# Artist signals that indicate "various artists". These are used at the # Artist signals that indicate "various artists". These are used at the
# album level to determine whether a given release is likely a VA # album level to determine whether a given release is likely a VA
@ -53,6 +53,13 @@ class Recommendation(OrderedEnum):
strong = 3 strong = 3
# A structure for holding a set of possible matches to choose between. This
# consists of a list of possible candidates (i.e., AlbumInfo or TrackInfo
# objects) and a recommendation value.
Proposal = namedtuple('Proposal', ('candidates', 'recommendation'))
# Primary matching functionality. # Primary matching functionality.
def current_metadata(items): def current_metadata(items):
@ -96,7 +103,9 @@ def assign_items(items, tracks):
costs.append(row) costs.append(row)
# Find a minimum-cost bipartite matching. # Find a minimum-cost bipartite matching.
log.debug('Computing track assignment...')
matching = Munkres().compute(costs) matching = Munkres().compute(costs)
log.debug('...done.')
# Produce the output matching. # Produce the output matching.
mapping = dict((items[i], tracks[j]) for (i, j) in matching) mapping = dict((items[i], tracks[j]) for (i, j) in matching)
@ -238,7 +247,7 @@ def distance(items, album_info, mapping):
# Tracks. # Tracks.
dist.tracks = {} dist.tracks = {}
for item, track in mapping.iteritems(): for item, track in mapping.items():
dist.tracks[track] = track_distance(item, track, album_info.va) dist.tracks[track] = track_distance(item, track, album_info.va)
dist.add('tracks', dist.tracks[track].distance) dist.add('tracks', dist.tracks[track].distance)
@ -261,19 +270,23 @@ def match_by_id(items):
AlbumInfo object for the corresponding album. Otherwise, returns AlbumInfo object for the corresponding album. Otherwise, returns
None. None.
""" """
# Is there a consensus on the MB album ID? albumids = (item.mb_albumid for item in items if item.mb_albumid)
albumids = [item.mb_albumid for item in items if item.mb_albumid]
if not albumids: # Did any of the items have an MB album ID?
log.debug(u'No album IDs found.') try:
first = next(albumids)
except StopIteration:
log.debug(u'No album ID found.')
return None return None
# If all album IDs are equal, look up the album. # Is there a consensus on the MB album ID?
if bool(reduce(lambda x, y: x if x == y else (), albumids)): for other in albumids:
albumid = albumids[0] if other != first:
log.debug(u'Searching for discovered album ID: {0}', albumid)
return hooks.album_for_mbid(albumid)
else:
log.debug(u'No album ID consensus.') log.debug(u'No album ID consensus.')
return None
# If all album IDs are equal, look up the album.
log.debug(u'Searching for discovered album ID: {0}', first)
return hooks.album_for_mbid(first)
def _recommendation(results): def _recommendation(results):
@ -312,10 +325,10 @@ def _recommendation(results):
keys = set(min_dist.keys()) keys = set(min_dist.keys())
if isinstance(results[0], hooks.AlbumMatch): if isinstance(results[0], hooks.AlbumMatch):
for track_dist in min_dist.tracks.values(): for track_dist in min_dist.tracks.values():
keys.update(track_dist.keys()) keys.update(list(track_dist.keys()))
max_rec_view = config['match']['max_rec'] max_rec_view = config['match']['max_rec']
for key in keys: for key in keys:
if key in max_rec_view.keys(): if key in list(max_rec_view.keys()):
max_rec = max_rec_view[key].as_choice({ max_rec = max_rec_view[key].as_choice({
'strong': Recommendation.strong, 'strong': Recommendation.strong,
'medium': Recommendation.medium, 'medium': Recommendation.medium,
@ -327,13 +340,19 @@ def _recommendation(results):
return rec return rec
def _sort_candidates(candidates):
"""Sort candidates by distance."""
return sorted(candidates, key=lambda match: match.distance)
def _add_candidate(items, results, info): def _add_candidate(items, results, info):
"""Given a candidate AlbumInfo object, attempt to add the candidate """Given a candidate AlbumInfo object, attempt to add the candidate
to the output dictionary of AlbumMatch objects. This involves to the output dictionary of AlbumMatch objects. This involves
checking the track count, ordering the items, checking for checking the track count, ordering the items, checking for
duplicates, and calculating the distance. duplicates, and calculating the distance.
""" """
log.debug(u'Candidate: {0} - {1}', info.artist, info.album) log.debug(u'Candidate: {0} - {1} ({2})',
info.artist, info.album, info.album_id)
# Discard albums with zero tracks. # Discard albums with zero tracks.
if not info.tracks: if not info.tracks:
@ -371,9 +390,8 @@ def _add_candidate(items, results, info):
def tag_album(items, search_artist=None, search_album=None, def tag_album(items, search_artist=None, search_album=None,
search_ids=[]): search_ids=[]):
"""Return a tuple of a artist name, an album name, a list of """Return a tuple of the current artist name, the current album
`AlbumMatch` candidates from the metadata backend, and a name, and a `Proposal` containing `AlbumMatch` candidates.
`Recommendation`.
The artist and album are the most common values of these fields The artist and album are the most common values of these fields
among `items`. among `items`.
@ -401,10 +419,10 @@ def tag_album(items, search_artist=None, search_album=None,
# Search by explicit ID. # Search by explicit ID.
if search_ids: if search_ids:
search_cands = []
for search_id in search_ids: for search_id in search_ids:
log.debug(u'Searching for album ID: {0}', search_id) log.debug(u'Searching for album ID: {0}', search_id)
search_cands.extend(hooks.albums_for_id(search_id)) for id_candidate in hooks.albums_for_id(search_id):
_add_candidate(items, candidates, id_candidate)
# Use existing metadata or text search. # Use existing metadata or text search.
else: else:
@ -412,7 +430,7 @@ def tag_album(items, search_artist=None, search_album=None,
id_info = match_by_id(items) id_info = match_by_id(items)
if id_info: if id_info:
_add_candidate(items, candidates, id_info) _add_candidate(items, candidates, id_info)
rec = _recommendation(candidates.values()) rec = _recommendation(list(candidates.values()))
log.debug(u'Album ID match recommendation is {0}', rec) log.debug(u'Album ID match recommendation is {0}', rec)
if candidates and not config['import']['timid']: if candidates and not config['import']['timid']:
# If we have a very good MBID match, return immediately. # If we have a very good MBID match, return immediately.
@ -420,7 +438,8 @@ def tag_album(items, search_artist=None, search_album=None,
# matches. # matches.
if rec == Recommendation.strong: if rec == Recommendation.strong:
log.debug(u'ID match.') log.debug(u'ID match.')
return cur_artist, cur_album, candidates.values(), rec return cur_artist, cur_album, \
Proposal(list(candidates.values()), rec)
# Search terms. # Search terms.
if not (search_artist and search_album): if not (search_artist and search_album):
@ -435,24 +454,25 @@ def tag_album(items, search_artist=None, search_album=None,
log.debug(u'Album might be VA: {0}', va_likely) log.debug(u'Album might be VA: {0}', va_likely)
# Get the results from the data sources. # Get the results from the data sources.
search_cands = hooks.album_candidates(items, search_artist, for matched_candidate in hooks.album_candidates(items,
search_album, va_likely) search_artist,
search_album,
log.debug(u'Evaluating {0} candidates.', len(search_cands)) va_likely):
for info in search_cands: _add_candidate(items, candidates, matched_candidate)
_add_candidate(items, candidates, info)
log.debug(u'Evaluating {0} candidates.', len(candidates))
# Sort and get the recommendation. # Sort and get the recommendation.
candidates = sorted(candidates.itervalues()) candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates) rec = _recommendation(candidates)
return cur_artist, cur_album, candidates, rec return cur_artist, cur_album, Proposal(candidates, rec)
def tag_item(item, search_artist=None, search_title=None, def tag_item(item, search_artist=None, search_title=None,
search_ids=[]): search_ids=[]):
"""Attempts to find metadata for a single track. Returns a """Find metadata for a single track. Return a `Proposal` consisting
`(candidates, recommendation)` pair where `candidates` is a list of of `TrackMatch` objects.
TrackMatch objects. `search_artist` and `search_title` may be used
`search_artist` and `search_title` may be used
to override the current metadata for the purposes of the MusicBrainz to override the current metadata for the purposes of the MusicBrainz
title. `search_ids` may be used for restricting the search to a list title. `search_ids` may be used for restricting the search to a list
of metadata backend IDs. of metadata backend IDs.
@ -462,7 +482,7 @@ def tag_item(item, search_artist=None, search_title=None,
candidates = {} candidates = {}
# First, try matching by MusicBrainz ID. # First, try matching by MusicBrainz ID.
trackids = search_ids or filter(None, [item.mb_trackid]) trackids = search_ids or [t for t in [item.mb_trackid] if t]
if trackids: if trackids:
for trackid in trackids: for trackid in trackids:
log.debug(u'Searching for track ID: {0}', trackid) log.debug(u'Searching for track ID: {0}', trackid)
@ -471,18 +491,18 @@ def tag_item(item, search_artist=None, search_title=None,
candidates[track_info.track_id] = \ candidates[track_info.track_id] = \
hooks.TrackMatch(dist, track_info) hooks.TrackMatch(dist, track_info)
# If this is a good match, then don't keep searching. # If this is a good match, then don't keep searching.
rec = _recommendation(sorted(candidates.itervalues())) rec = _recommendation(_sort_candidates(candidates.values()))
if rec == Recommendation.strong and \ if rec == Recommendation.strong and \
not config['import']['timid']: not config['import']['timid']:
log.debug(u'Track ID match.') log.debug(u'Track ID match.')
return sorted(candidates.itervalues()), rec return Proposal(_sort_candidates(candidates.values()), rec)
# If we're searching by ID, don't proceed. # If we're searching by ID, don't proceed.
if search_ids: if search_ids:
if candidates: if candidates:
return sorted(candidates.itervalues()), rec return Proposal(_sort_candidates(candidates.values()), rec)
else: else:
return [], Recommendation.none return Proposal([], Recommendation.none)
# Search terms. # Search terms.
if not (search_artist and search_title): if not (search_artist and search_title):
@ -496,6 +516,6 @@ def tag_item(item, search_artist=None, search_title=None,
# Sort by distance and return with recommendation. # Sort by distance and return with recommendation.
log.debug(u'Found {0} candidates.', len(candidates)) log.debug(u'Found {0} candidates.', len(candidates))
candidates = sorted(candidates.itervalues()) candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates) rec = _recommendation(candidates)
return candidates, rec return Proposal(candidates, rec)

View file

@ -20,17 +20,24 @@ from __future__ import division, absolute_import, print_function
import musicbrainzngs import musicbrainzngs
import re import re
import traceback import traceback
from urlparse import urljoin from six.moves.urllib.parse import urljoin
from beets import logging from beets import logging
import beets.autotag.hooks import beets.autotag.hooks
import beets import beets
from beets import util from beets import util
from beets import config from beets import config
import six
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377' VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
if util.SNI_SUPPORTED:
BASE_URL = 'https://musicbrainz.org/'
else:
BASE_URL = 'http://musicbrainz.org/' BASE_URL = 'http://musicbrainz.org/'
SKIPPED_TRACKS = ['[data track]']
musicbrainzngs.set_useragent('beets', beets.__version__, musicbrainzngs.set_useragent('beets', beets.__version__,
'http://beets.io/') 'http://beets.io/')
@ -53,8 +60,12 @@ class MusicBrainzAPIError(util.HumanReadableException):
log = logging.getLogger('beets') log = logging.getLogger('beets')
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups', RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases'] 'labels', 'artist-credits', 'aliases',
'recording-level-rels', 'work-rels',
'work-level-rels', 'artist-rels']
TRACK_INCLUDES = ['artists', 'aliases'] TRACK_INCLUDES = ['artists', 'aliases']
if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']:
TRACK_INCLUDES += ['work-level-rels', 'artist-rels']
def track_url(trackid): def track_url(trackid):
@ -69,7 +80,8 @@ def configure():
"""Set up the python-musicbrainz-ngs module according to settings """Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup. from the beets configuration. This should be called at startup.
""" """
musicbrainzngs.set_hostname(config['musicbrainz']['host'].get(unicode)) hostname = config['musicbrainz']['host'].as_str()
musicbrainzngs.set_hostname(hostname)
musicbrainzngs.set_rate_limit( musicbrainzngs.set_rate_limit(
config['musicbrainz']['ratelimit_interval'].as_number(), config['musicbrainz']['ratelimit_interval'].as_number(),
config['musicbrainz']['ratelimit'].get(int), config['musicbrainz']['ratelimit'].get(int),
@ -99,6 +111,24 @@ def _preferred_alias(aliases):
return matches[0] return matches[0]
def _preferred_release_event(release):
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
"""
countries = config['match']['preferred']['countries'].as_str_seq()
for country in countries:
for event in release.get('release-event-list', {}):
try:
if country in event['area']['iso-3166-1-code-list']:
return country, event['date']
except KeyError:
pass
return release.get('country'), release.get('date')
def _flatten_artist_credit(credit): def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the """Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and data into a triple of joined artist name strings: canonical, sort, and
@ -108,7 +138,7 @@ def _flatten_artist_credit(credit):
artist_sort_parts = [] artist_sort_parts = []
artist_credit_parts = [] artist_credit_parts = []
for el in credit: for el in credit:
if isinstance(el, basestring): if isinstance(el, six.string_types):
# Join phrase. # Join phrase.
artist_parts.append(el) artist_parts.append(el)
artist_credit_parts.append(el) artist_credit_parts.append(el)
@ -177,6 +207,37 @@ def track_info(recording, index=None, medium=None, medium_index=None,
if recording.get('length'): if recording.get('length'):
info.length = int(recording['length']) / (1000.0) info.length = int(recording['length']) / (1000.0)
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance':
continue
for artist_relation in work_relation['work'].get(
'artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'lyricist':
lyricist.append(artist_relation['artist']['name'])
elif type == 'composer':
composer.append(artist_relation['artist']['name'])
composer_sort.append(
artist_relation['artist']['sort-name'])
if lyricist:
info.lyricist = u', '.join(lyricist)
if composer:
info.composer = u', '.join(composer)
info.composer_sort = u', '.join(composer_sort)
arranger = []
for artist_relation in recording.get('artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'arranger':
arranger.append(artist_relation['artist']['name'])
if arranger:
info.arranger = u', '.join(arranger)
info.decode() info.decode()
return info return info
@ -216,11 +277,28 @@ def album_info(release):
disctitle = medium.get('title') disctitle = medium.get('title')
format = medium.get('format') format = medium.get('format')
if format in config['match']['ignored_media'].as_str_seq():
continue
all_tracks = medium['track-list'] all_tracks = medium['track-list']
if 'data-track-list' in medium:
all_tracks += medium['data-track-list']
track_count = len(all_tracks)
if 'pregap' in medium: if 'pregap' in medium:
all_tracks.insert(0, medium['pregap']) all_tracks.insert(0, medium['pregap'])
for track in all_tracks: for track in all_tracks:
if ('title' in track['recording'] and
track['recording']['title'] in SKIPPED_TRACKS):
continue
if ('video' in track['recording'] and
track['recording']['video'] == 'true' and
config['match']['ignore_video_tracks']):
continue
# Basic information from the recording. # Basic information from the recording.
index += 1 index += 1
ti = track_info( ti = track_info(
@ -228,10 +306,12 @@ def album_info(release):
index, index,
int(medium['position']), int(medium['position']),
int(track['position']), int(track['position']),
len(medium['track-list']), track_count,
) )
ti.release_track_id = track['id']
ti.disctitle = disctitle ti.disctitle = disctitle
ti.media = format ti.media = format
ti.track_alt = track['number']
# Prefer track data, where present, over recording data. # Prefer track data, where present, over recording data.
if track.get('title'): if track.get('title'):
@ -260,10 +340,9 @@ def album_info(release):
) )
info.va = info.artist_id == VARIOUS_ARTISTS_ID info.va = info.artist_id == VARIOUS_ARTISTS_ID
if info.va: if info.va:
info.artist = config['va_name'].get(unicode) info.artist = config['va_name'].as_str()
info.asin = release.get('asin') info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id'] info.releasegroup_id = release['release-group']['id']
info.country = release.get('country')
info.albumstatus = release.get('status') info.albumstatus = release.get('status')
# Build up the disambiguation string from the release group and release. # Build up the disambiguation string from the release group and release.
@ -274,14 +353,28 @@ def album_info(release):
disambig.append(release.get('disambiguation')) disambig.append(release.get('disambiguation'))
info.albumdisambig = u', '.join(disambig) info.albumdisambig = u', '.join(disambig)
# Release type not always populated. # Get the "classic" Release type. This data comes from a legacy API
# feature before MusicBrainz supported multiple release types.
if 'type' in release['release-group']: if 'type' in release['release-group']:
reltype = release['release-group']['type'] reltype = release['release-group']['type']
if reltype: if reltype:
info.albumtype = reltype.lower() info.albumtype = reltype.lower()
# Release dates. # Log the new-style "primary" and "secondary" release types.
release_date = release.get('date') # Eventually, we'd like to actually store this data, but we just log
# it for now to help understand the differences.
if 'primary-type' in release['release-group']:
rel_primarytype = release['release-group']['primary-type']
if rel_primarytype:
log.debug('primary MB release type: ' + rel_primarytype.lower())
if 'secondary-type-list' in release['release-group']:
if release['release-group']['secondary-type-list']:
log.debug('secondary MB release type(s): ' + ', '.join(
[secondarytype.lower() for secondarytype in
release['release-group']['secondary-type-list']]))
# Release events.
info.country, release_date = _preferred_release_event(release)
release_group_date = release['release-group'].get('first-release-date') release_group_date = release['release-group'].get('first-release-date')
if not release_date: if not release_date:
# Fall back if release-specific date is not available. # Fall back if release-specific date is not available.
@ -329,13 +422,14 @@ def match_album(artist, album, tracks=None):
# Various Artists search. # Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None: if tracks is not None:
criteria['tracks'] = unicode(tracks) criteria['tracks'] = six.text_type(tracks)
# Abort if we have no search terms. # Abort if we have no search terms.
if not any(criteria.itervalues()): if not any(criteria.values()):
return return
try: try:
log.debug(u'Searching for MusicBrainz releases with: {!r}', criteria)
res = musicbrainzngs.search_releases( res = musicbrainzngs.search_releases(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria) limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc: except musicbrainzngs.MusicBrainzError as exc:
@ -358,7 +452,7 @@ def match_track(artist, title):
'recording': title.lower().strip(), 'recording': title.lower().strip(),
} }
if not any(criteria.itervalues()): if not any(criteria.values()):
return return
try: try:
@ -376,7 +470,7 @@ def _parse_id(s):
no ID can be found, return None. no ID can be found, return None.
""" """
# Find the first thing that looks like a UUID/MBID. # Find the first thing that looks like a UUID/MBID.
match = re.search(ur'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s) match = re.search(u'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match: if match:
return match.group() return match.group()
@ -386,6 +480,7 @@ def album_for_id(releaseid):
object or None if the album is not found. May raise a object or None if the album is not found. May raise a
MusicBrainzAPIError. MusicBrainzAPIError.
""" """
log.debug(u'Requesting MusicBrainz release {}', releaseid)
albumid = _parse_id(releaseid) albumid = _parse_id(releaseid)
if not albumid: if not albumid:
log.debug(u'Invalid MBID ({0}).', releaseid) log.debug(u'Invalid MBID ({0}).', releaseid)

View file

@ -6,9 +6,12 @@ import:
copy: yes copy: yes
move: no move: no
link: no link: no
hardlink: no
delete: no delete: no
resume: ask resume: ask
incremental: no incremental: no
incremental_skip_later: no
from_scratch: no
quiet_fallback: skip quiet_fallback: skip
none_rec_action: ask none_rec_action: ask
timid: no timid: no
@ -23,6 +26,9 @@ import:
group_albums: no group_albums: no
pretend: false pretend: false
search_ids: [] search_ids: []
duplicate_action: ask
bell: no
set_fields: {}
clutter: ["Thumbs.DB", ".DS_Store"] clutter: ["Thumbs.DB", ".DS_Store"]
ignore: [".*", "*~", "System Volume Information", "lost+found"] ignore: [".*", "*~", "System Volume Information", "lost+found"]
@ -36,6 +42,7 @@ replace:
'\.$': _ '\.$': _
'\s+$': '' '\s+$': ''
'^\s+': '' '^\s+': ''
'^-': _
path_sep_replace: _ path_sep_replace: _
asciify_paths: false asciify_paths: false
art_filename: cover art_filename: cover
@ -49,6 +56,7 @@ per_disc_numbering: no
verbose: 0 verbose: 0
terminal_encoding: terminal_encoding:
original_date: no original_date: no
artist_credit: no
id3v23: no id3v23: no
va_name: "Various Artists" va_name: "Various Artists"
@ -120,5 +128,7 @@ match:
original_year: no original_year: no
ignored: [] ignored: []
required: [] required: []
ignored_media: []
ignore_video_tracks: yes
track_length_grace: 10 track_length_grace: 10
track_length_max: 30 track_length_max: 30

View file

@ -27,8 +27,19 @@ import collections
import beets import beets
from beets.util.functemplate import Template from beets.util.functemplate import Template
from beets.util import py3_path
from beets.dbcore import types from beets.dbcore import types
from .query import MatchQuery, NullSort, TrueQuery from .query import MatchQuery, NullSort, TrueQuery
import six
class DBAccessError(Exception):
"""The SQLite database became inaccessible.
This can happen when trying to read or write the database when, for
example, the database file is deleted or otherwise disappears. There
is probably no way to recover from this error.
"""
class FormattedMapping(collections.Mapping): class FormattedMapping(collections.Mapping):
@ -66,10 +77,10 @@ class FormattedMapping(collections.Mapping):
def _get_formatted(self, model, key): def _get_formatted(self, model, key):
value = model._type(key).format(model.get(key)) value = model._type(key).format(model.get(key))
if isinstance(value, bytes): if isinstance(value, bytes):
value = value.decode('utf8', 'ignore') value = value.decode('utf-8', 'ignore')
if self.for_path: if self.for_path:
sep_repl = beets.config['path_sep_replace'].get(unicode) sep_repl = beets.config['path_sep_replace'].as_str()
for sep in (os.path.sep, os.path.altsep): for sep in (os.path.sep, os.path.altsep):
if sep: if sep:
value = value.replace(sep, sep_repl) value = value.replace(sep, sep_repl)
@ -176,9 +187,9 @@ class Model(object):
ordinary construction are bypassed. ordinary construction are bypassed.
""" """
obj = cls(db) obj = cls(db)
for key, value in fixed_values.iteritems(): for key, value in fixed_values.items():
obj._values_fixed[key] = cls._type(key).from_sql(value) obj._values_fixed[key] = cls._type(key).from_sql(value)
for key, value in flex_values.iteritems(): for key, value in flex_values.items():
obj._values_flex[key] = cls._type(key).from_sql(value) obj._values_flex[key] = cls._type(key).from_sql(value)
return obj return obj
@ -206,6 +217,21 @@ class Model(object):
if need_id and not self.id: if need_id and not self.id:
raise ValueError(u'{0} has no id'.format(type(self).__name__)) raise ValueError(u'{0} has no id'.format(type(self).__name__))
def copy(self):
"""Create a copy of the model object.
The field values and other state is duplicated, but the new copy
remains associated with the same database as the old object.
(A simple `copy.deepcopy` will not work because it would try to
duplicate the SQLite connection.)
"""
new = self.__class__()
new._db = self._db
new._values_fixed = self._values_fixed.copy()
new._values_flex = self._values_flex.copy()
new._dirty = self._dirty.copy()
return new
# Essential field accessors. # Essential field accessors.
@classmethod @classmethod
@ -225,14 +251,15 @@ class Model(object):
if key in getters: # Computed. if key in getters: # Computed.
return getters[key](self) return getters[key](self)
elif key in self._fields: # Fixed. elif key in self._fields: # Fixed.
return self._values_fixed.get(key) return self._values_fixed.get(key, self._type(key).null)
elif key in self._values_flex: # Flexible. elif key in self._values_flex: # Flexible.
return self._values_flex[key] return self._values_flex[key]
else: else:
raise KeyError(key) raise KeyError(key)
def __setitem__(self, key, value): def _setitem(self, key, value):
"""Assign the value for a field. """Assign the value for a field, return whether new and old value
differ.
""" """
# Choose where to place the value. # Choose where to place the value.
if key in self._fields: if key in self._fields:
@ -246,9 +273,17 @@ class Model(object):
# Assign value and possibly mark as dirty. # Assign value and possibly mark as dirty.
old_value = source.get(key) old_value = source.get(key)
source[key] = value source[key] = value
if self._always_dirty or old_value != value: changed = old_value != value
if self._always_dirty or changed:
self._dirty.add(key) self._dirty.add(key)
return changed
def __setitem__(self, key, value):
"""Assign the value for a field.
"""
self._setitem(key, value)
def __delitem__(self, key): def __delitem__(self, key):
"""Remove a flexible attribute from the model. """Remove a flexible attribute from the model.
""" """
@ -267,9 +302,9 @@ class Model(object):
`computed` parameter controls whether computed (plugin-provided) `computed` parameter controls whether computed (plugin-provided)
fields are included in the key list. fields are included in the key list.
""" """
base_keys = list(self._fields) + self._values_flex.keys() base_keys = list(self._fields) + list(self._values_flex.keys())
if computed: if computed:
return base_keys + self._getters().keys() return base_keys + list(self._getters().keys())
else: else:
return base_keys return base_keys
@ -278,7 +313,7 @@ class Model(object):
"""Get a list of available keys for objects of this type. """Get a list of available keys for objects of this type.
Includes fixed and computed fields. Includes fixed and computed fields.
""" """
return list(cls._fields) + cls._getters().keys() return list(cls._fields) + list(cls._getters().keys())
# Act like a dictionary. # Act like a dictionary.
@ -340,15 +375,19 @@ class Model(object):
# Database interaction (CRUD methods). # Database interaction (CRUD methods).
def store(self): def store(self, fields=None):
"""Save the object's metadata into the library database. """Save the object's metadata into the library database.
:param fields: the fields to be stored. If not specified, all fields
will be.
""" """
if fields is None:
fields = self._fields
self._check_db() self._check_db()
# Build assignments for query. # Build assignments for query.
assignments = [] assignments = []
subvars = [] subvars = []
for key in self._fields: for key in fields:
if key != 'id' and key in self._dirty: if key != 'id' and key in self._dirty:
self._dirty.remove(key) self._dirty.remove(key)
assignments.append(key + '=?') assignments.append(key + '=?')
@ -452,7 +491,7 @@ class Model(object):
separators will be added to the template. separators will be added to the template.
""" """
# Perform substitution. # Perform substitution.
if isinstance(template, basestring): if isinstance(template, six.string_types):
template = Template(template) template = Template(template)
return template.substitute(self.formatted(for_path), return template.substitute(self.formatted(for_path),
self._template_funcs()) self._template_funcs())
@ -463,7 +502,7 @@ class Model(object):
def _parse(cls, key, string): def _parse(cls, key, string):
"""Parse a string as a value for the given key. """Parse a string as a value for the given key.
""" """
if not isinstance(string, basestring): if not isinstance(string, six.string_types):
raise TypeError(u"_parse() argument must be a string") raise TypeError(u"_parse() argument must be a string")
return cls._type(key).parse(string) return cls._type(key).parse(string)
@ -593,6 +632,11 @@ class Results(object):
return self._row_count return self._row_count
def __nonzero__(self): def __nonzero__(self):
"""Does this result contain any objects?
"""
return self.__bool__()
def __bool__(self):
"""Does this result contain any objects? """Does this result contain any objects?
""" """
return bool(len(self)) return bool(len(self))
@ -669,8 +713,18 @@ class Transaction(object):
"""Execute an SQL statement with substitution values and return """Execute an SQL statement with substitution values and return
the row ID of the last affected row. the row ID of the last affected row.
""" """
try:
cursor = self.db._connection().execute(statement, subvals) cursor = self.db._connection().execute(statement, subvals)
return cursor.lastrowid return cursor.lastrowid
except sqlite3.OperationalError as e:
# In two specific cases, SQLite reports an error while accessing
# the underlying database file. We surface these exceptions as
# DBAccessError so the application can abort.
if e.args[0] in ("attempt to write a readonly database",
"unable to open database file"):
raise DBAccessError(e.args[0])
else:
raise
def script(self, statements): def script(self, statements):
"""Execute a string containing multiple SQL statements.""" """Execute a string containing multiple SQL statements."""
@ -685,8 +739,9 @@ class Database(object):
"""The Model subclasses representing tables in this database. """The Model subclasses representing tables in this database.
""" """
def __init__(self, path): def __init__(self, path, timeout=5.0):
self.path = path self.path = path
self.timeout = timeout
self._connections = {} self._connections = {}
self._tx_stacks = defaultdict(list) self._tx_stacks = defaultdict(list)
@ -721,18 +776,36 @@ class Database(object):
if thread_id in self._connections: if thread_id in self._connections:
return self._connections[thread_id] return self._connections[thread_id]
else: else:
# Make a new connection. conn = self._create_connection()
self._connections[thread_id] = conn
return conn
def _create_connection(self):
"""Create a SQLite connection to the underlying database.
Makes a new connection every time. If you need to configure the
connection settings (e.g., add custom functions), override this
method.
"""
# Make a new connection. The `sqlite3` module can't use
# bytestring paths here on Python 3, so we need to
# provide a `str` using `py3_path`.
conn = sqlite3.connect( conn = sqlite3.connect(
self.path, py3_path(self.path), timeout=self.timeout
timeout=beets.config['timeout'].as_number(),
) )
# Access SELECT results like dictionaries. # Access SELECT results like dictionaries.
conn.row_factory = sqlite3.Row conn.row_factory = sqlite3.Row
self._connections[thread_id] = conn
return conn return conn
def _close(self):
"""Close the all connections to the underlying SQLite database
from all threads. This does not render the database object
unusable; new connections can still be opened on demand.
"""
with self._shared_map_lock:
self._connections.clear()
@contextlib.contextmanager @contextlib.contextmanager
def _tx_stack(self): def _tx_stack(self):
"""A context manager providing access to the current thread's """A context manager providing access to the current thread's

View file

@ -23,6 +23,10 @@ from beets import util
from datetime import datetime, timedelta from datetime import datetime, timedelta
import unicodedata import unicodedata
from functools import reduce from functools import reduce
import six
if not six.PY2:
buffer = memoryview # sqlite won't accept memoryview in python 2
class ParsingError(ValueError): class ParsingError(ValueError):
@ -36,6 +40,7 @@ class InvalidQueryError(ParsingError):
The query should be a unicode string or a list, which will be space-joined. The query should be a unicode string or a list, which will be space-joined.
""" """
def __init__(self, query, explanation): def __init__(self, query, explanation):
if isinstance(query, list): if isinstance(query, list):
query = " ".join(query) query = " ".join(query)
@ -43,22 +48,24 @@ class InvalidQueryError(ParsingError):
super(InvalidQueryError, self).__init__(message) super(InvalidQueryError, self).__init__(message)
class InvalidQueryArgumentTypeError(ParsingError): class InvalidQueryArgumentValueError(ParsingError):
"""Represent a query argument that could not be converted as expected. """Represent a query argument that could not be converted as expected.
It exists to be caught in upper stack levels so a meaningful (i.e. with the It exists to be caught in upper stack levels so a meaningful (i.e. with the
query) InvalidQueryError can be raised. query) InvalidQueryError can be raised.
""" """
def __init__(self, what, expected, detail=None): def __init__(self, what, expected, detail=None):
message = u"'{0}' is not {1}".format(what, expected) message = u"'{0}' is not {1}".format(what, expected)
if detail: if detail:
message = u"{0}: {1}".format(message, detail) message = u"{0}: {1}".format(message, detail)
super(InvalidQueryArgumentTypeError, self).__init__(message) super(InvalidQueryArgumentValueError, self).__init__(message)
class Query(object): class Query(object):
"""An abstract class representing a query into the item database. """An abstract class representing a query into the item database.
""" """
def clause(self): def clause(self):
"""Generate an SQLite expression implementing the query. """Generate an SQLite expression implementing the query.
@ -91,6 +98,7 @@ class FieldQuery(Query):
string. Subclasses may also provide `col_clause` to implement the string. Subclasses may also provide `col_clause` to implement the
same matching functionality in SQLite. same matching functionality in SQLite.
""" """
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
self.field = field self.field = field
self.pattern = pattern self.pattern = pattern
@ -130,6 +138,7 @@ class FieldQuery(Query):
class MatchQuery(FieldQuery): class MatchQuery(FieldQuery):
"""A query that looks for exact matches in an item field.""" """A query that looks for exact matches in an item field."""
def col_clause(self): def col_clause(self):
return self.field + " = ?", [self.pattern] return self.field + " = ?", [self.pattern]
@ -139,6 +148,7 @@ class MatchQuery(FieldQuery):
class NoneQuery(FieldQuery): class NoneQuery(FieldQuery):
"""A query that checks whether a field is null."""
def __init__(self, field, fast=True): def __init__(self, field, fast=True):
super(NoneQuery, self).__init__(field, None, fast) super(NoneQuery, self).__init__(field, None, fast)
@ -161,6 +171,7 @@ class StringFieldQuery(FieldQuery):
"""A FieldQuery that converts values to strings before matching """A FieldQuery that converts values to strings before matching
them. them.
""" """
@classmethod @classmethod
def value_match(cls, pattern, value): def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. The value """Determine whether the value matches the pattern. The value
@ -178,6 +189,7 @@ class StringFieldQuery(FieldQuery):
class SubstringQuery(StringFieldQuery): class SubstringQuery(StringFieldQuery):
"""A query that matches a substring in a specific item field.""" """A query that matches a substring in a specific item field."""
def col_clause(self): def col_clause(self):
pattern = (self.pattern pattern = (self.pattern
.replace('\\', '\\\\') .replace('\\', '\\\\')
@ -200,6 +212,7 @@ class RegexpQuery(StringFieldQuery):
Raises InvalidQueryError when the pattern is not a valid regular Raises InvalidQueryError when the pattern is not a valid regular
expression. expression.
""" """
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
super(RegexpQuery, self).__init__(field, pattern, fast) super(RegexpQuery, self).__init__(field, pattern, fast)
pattern = self._normalize(pattern) pattern = self._normalize(pattern)
@ -207,7 +220,7 @@ class RegexpQuery(StringFieldQuery):
self.pattern = re.compile(self.pattern) self.pattern = re.compile(self.pattern)
except re.error as exc: except re.error as exc:
# Invalid regular expression. # Invalid regular expression.
raise InvalidQueryArgumentTypeError(pattern, raise InvalidQueryArgumentValueError(pattern,
u"a regular expression", u"a regular expression",
format(exc)) format(exc))
@ -227,9 +240,10 @@ class BooleanQuery(MatchQuery):
"""Matches a boolean field. Pattern should either be a boolean or a """Matches a boolean field. Pattern should either be a boolean or a
string reflecting a boolean. string reflecting a boolean.
""" """
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
super(BooleanQuery, self).__init__(field, pattern, fast) super(BooleanQuery, self).__init__(field, pattern, fast)
if isinstance(pattern, basestring): if isinstance(pattern, six.string_types):
self.pattern = util.str2bool(pattern) self.pattern = util.str2bool(pattern)
self.pattern = int(self.pattern) self.pattern = int(self.pattern)
@ -240,17 +254,16 @@ class BytesQuery(MatchQuery):
`unicode` equivalently in Python 2. Always use this query instead of `unicode` equivalently in Python 2. Always use this query instead of
`MatchQuery` when matching on BLOB values. `MatchQuery` when matching on BLOB values.
""" """
def __init__(self, field, pattern): def __init__(self, field, pattern):
super(BytesQuery, self).__init__(field, pattern) super(BytesQuery, self).__init__(field, pattern)
# Use a buffer representation of the pattern for SQLite # Use a buffer/memoryview representation of the pattern for SQLite
# matching. This instructs SQLite to treat the blob as binary # matching. This instructs SQLite to treat the blob as binary
# rather than encoded Unicode. # rather than encoded Unicode.
if isinstance(self.pattern, basestring): if isinstance(self.pattern, (six.text_type, bytes)):
# Implicitly coerce Unicode strings to their bytes if isinstance(self.pattern, six.text_type):
# equivalents. self.pattern = self.pattern.encode('utf-8')
if isinstance(self.pattern, unicode):
self.pattern = self.pattern.encode('utf8')
self.buf_pattern = buffer(self.pattern) self.buf_pattern = buffer(self.pattern)
elif isinstance(self.pattern, buffer): elif isinstance(self.pattern, buffer):
self.buf_pattern = self.pattern self.buf_pattern = self.pattern
@ -268,6 +281,7 @@ class NumericQuery(FieldQuery):
Raises InvalidQueryError when the pattern does not represent an int or Raises InvalidQueryError when the pattern does not represent an int or
a float. a float.
""" """
def _convert(self, s): def _convert(self, s):
"""Convert a string to a numeric type (float or int). """Convert a string to a numeric type (float or int).
@ -283,7 +297,7 @@ class NumericQuery(FieldQuery):
try: try:
return float(s) return float(s)
except ValueError: except ValueError:
raise InvalidQueryArgumentTypeError(s, u"an int or a float") raise InvalidQueryArgumentValueError(s, u"an int or a float")
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
super(NumericQuery, self).__init__(field, pattern, fast) super(NumericQuery, self).__init__(field, pattern, fast)
@ -304,7 +318,7 @@ class NumericQuery(FieldQuery):
if self.field not in item: if self.field not in item:
return False return False
value = item[self.field] value = item[self.field]
if isinstance(value, basestring): if isinstance(value, six.string_types):
value = self._convert(value) value = self._convert(value)
if self.point is not None: if self.point is not None:
@ -335,6 +349,7 @@ class CollectionQuery(Query):
"""An abstract query class that aggregates other queries. Can be """An abstract query class that aggregates other queries. Can be
indexed like a list to access the sub-queries. indexed like a list to access the sub-queries.
""" """
def __init__(self, subqueries=()): def __init__(self, subqueries=()):
self.subqueries = subqueries self.subqueries = subqueries
@ -387,6 +402,7 @@ class AnyFieldQuery(CollectionQuery):
any field. The individual field query class is provided to the any field. The individual field query class is provided to the
constructor. constructor.
""" """
def __init__(self, pattern, fields, cls): def __init__(self, pattern, fields, cls):
self.pattern = pattern self.pattern = pattern
self.fields = fields self.fields = fields
@ -422,6 +438,7 @@ class MutableCollectionQuery(CollectionQuery):
"""A collection query whose subqueries may be modified after the """A collection query whose subqueries may be modified after the
query is initialized. query is initialized.
""" """
def __setitem__(self, key, value): def __setitem__(self, key, value):
self.subqueries[key] = value self.subqueries[key] = value
@ -431,6 +448,7 @@ class MutableCollectionQuery(CollectionQuery):
class AndQuery(MutableCollectionQuery): class AndQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries.""" """A conjunction of a list of other queries."""
def clause(self): def clause(self):
return self.clause_with_joiner('and') return self.clause_with_joiner('and')
@ -440,6 +458,7 @@ class AndQuery(MutableCollectionQuery):
class OrQuery(MutableCollectionQuery): class OrQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries.""" """A conjunction of a list of other queries."""
def clause(self): def clause(self):
return self.clause_with_joiner('or') return self.clause_with_joiner('or')
@ -451,6 +470,7 @@ class NotQuery(Query):
"""A query that matches the negation of its `subquery`, as a shorcut for """A query that matches the negation of its `subquery`, as a shorcut for
performing `not(subquery)` without using regular expressions. performing `not(subquery)` without using regular expressions.
""" """
def __init__(self, subquery): def __init__(self, subquery):
self.subquery = subquery self.subquery = subquery
@ -479,6 +499,7 @@ class NotQuery(Query):
class TrueQuery(Query): class TrueQuery(Query):
"""A query that always matches.""" """A query that always matches."""
def clause(self): def clause(self):
return '1', () return '1', ()
@ -488,6 +509,7 @@ class TrueQuery(Query):
class FalseQuery(Query): class FalseQuery(Query):
"""A query that never matches.""" """A query that never matches."""
def clause(self): def clause(self):
return '0', () return '0', ()
@ -501,6 +523,10 @@ def _to_epoch_time(date):
"""Convert a `datetime` object to an integer number of seconds since """Convert a `datetime` object to an integer number of seconds since
the (local) Unix epoch. the (local) Unix epoch.
""" """
if hasattr(date, 'timestamp'):
# The `timestamp` method exists on Python 3.3+.
return int(date.timestamp())
else:
epoch = datetime.fromtimestamp(0) epoch = datetime.fromtimestamp(0)
delta = date - epoch delta = date - epoch
return int(delta.total_seconds()) return int(delta.total_seconds())
@ -527,12 +553,23 @@ class Period(object):
instants of time during January 2014. instants of time during January 2014.
""" """
precisions = ('year', 'month', 'day') precisions = ('year', 'month', 'day', 'hour', 'minute', 'second')
date_formats = ('%Y', '%Y-%m', '%Y-%m-%d') date_formats = (
('%Y',), # year
('%Y-%m',), # month
('%Y-%m-%d',), # day
('%Y-%m-%dT%H', '%Y-%m-%d %H'), # hour
('%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M'), # minute
('%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S') # second
)
relative_units = {'y': 365, 'm': 30, 'w': 7, 'd': 1}
relative_re = '(?P<sign>[+|-]?)(?P<quantity>[0-9]+)' + \
'(?P<timespan>[y|m|w|d])'
def __init__(self, date, precision): def __init__(self, date, precision):
"""Create a period with the given date (a `datetime` object) and """Create a period with the given date (a `datetime` object) and
precision (a string, one of "year", "month", or "day"). precision (a string, one of "year", "month", "day", "hour", "minute",
or "second").
""" """
if precision not in Period.precisions: if precision not in Period.precisions:
raise ValueError(u'Invalid precision {0}'.format(precision)) raise ValueError(u'Invalid precision {0}'.format(precision))
@ -542,20 +579,55 @@ class Period(object):
@classmethod @classmethod
def parse(cls, string): def parse(cls, string):
"""Parse a date and return a `Period` object or `None` if the """Parse a date and return a `Period` object or `None` if the
string is empty. string is empty, or raise an InvalidQueryArgumentValueError if
the string cannot be parsed to a date.
The date may be absolute or relative. Absolute dates look like
`YYYY`, or `YYYY-MM-DD`, or `YYYY-MM-DD HH:MM:SS`, etc. Relative
dates have three parts:
- Optionally, a ``+`` or ``-`` sign indicating the future or the
past. The default is the future.
- A number: how much to add or subtract.
- A letter indicating the unit: days, weeks, months or years
(``d``, ``w``, ``m`` or ``y``). A "month" is exactly 30 days
and a "year" is exactly 365 days.
""" """
if not string:
return None def find_date_and_format(string):
ordinal = string.count('-') for ord, format in enumerate(cls.date_formats):
if ordinal >= len(cls.date_formats): for format_option in format:
# Too many components.
return None
date_format = cls.date_formats[ordinal]
try: try:
date = datetime.strptime(string, date_format) date = datetime.strptime(string, format_option)
return date, ord
except ValueError: except ValueError:
# Parsing failed. # Parsing failed.
pass
return (None, None)
if not string:
return None return None
# Check for a relative date.
match_dq = re.match(cls.relative_re, string)
if match_dq:
sign = match_dq.group('sign')
quantity = match_dq.group('quantity')
timespan = match_dq.group('timespan')
# Add or subtract the given amount of time from the current
# date.
multiplier = -1 if sign == '-' else 1
days = cls.relative_units[timespan]
date = datetime.now() + \
timedelta(days=int(quantity) * days) * multiplier
return cls(date, cls.precisions[5])
# Check for an absolute date.
date, ordinal = find_date_and_format(string)
if date is None:
raise InvalidQueryArgumentValueError(string,
'a valid date/time string')
precision = cls.precisions[ordinal] precision = cls.precisions[ordinal]
return cls(date, precision) return cls(date, precision)
@ -574,6 +646,12 @@ class Period(object):
return date.replace(year=date.year + 1, month=1) return date.replace(year=date.year + 1, month=1)
elif 'day' == precision: elif 'day' == precision:
return date + timedelta(days=1) return date + timedelta(days=1)
elif 'hour' == precision:
return date + timedelta(hours=1)
elif 'minute' == precision:
return date + timedelta(minutes=1)
elif 'second' == precision:
return date + timedelta(seconds=1)
else: else:
raise ValueError(u'unhandled precision {0}'.format(precision)) raise ValueError(u'unhandled precision {0}'.format(precision))
@ -620,14 +698,17 @@ class DateQuery(FieldQuery):
The value of a date field can be matched against a date interval by The value of a date field can be matched against a date interval by
using an ellipsis interval syntax similar to that of NumericQuery. using an ellipsis interval syntax similar to that of NumericQuery.
""" """
def __init__(self, field, pattern, fast=True): def __init__(self, field, pattern, fast=True):
super(DateQuery, self).__init__(field, pattern, fast) super(DateQuery, self).__init__(field, pattern, fast)
start, end = _parse_periods(pattern) start, end = _parse_periods(pattern)
self.interval = DateInterval.from_periods(start, end) self.interval = DateInterval.from_periods(start, end)
def match(self, item): def match(self, item):
if self.field not in item:
return False
timestamp = float(item[self.field]) timestamp = float(item[self.field])
date = datetime.utcfromtimestamp(timestamp) date = datetime.fromtimestamp(timestamp)
return self.interval.contains(date) return self.interval.contains(date)
_clause_tmpl = "{0} {1} ?" _clause_tmpl = "{0} {1} ?"
@ -661,6 +742,7 @@ class DurationQuery(NumericQuery):
Raises InvalidQueryError when the pattern does not represent an int, float Raises InvalidQueryError when the pattern does not represent an int, float
or M:SS time interval. or M:SS time interval.
""" """
def _convert(self, s): def _convert(self, s):
"""Convert a M:SS or numeric string to a float. """Convert a M:SS or numeric string to a float.
@ -675,7 +757,7 @@ class DurationQuery(NumericQuery):
try: try:
return float(s) return float(s)
except ValueError: except ValueError:
raise InvalidQueryArgumentTypeError( raise InvalidQueryArgumentValueError(
s, s,
u"a M:SS string or a float") u"a M:SS string or a float")
@ -783,6 +865,7 @@ class FieldSort(Sort):
"""An abstract sort criterion that orders by a specific field (of """An abstract sort criterion that orders by a specific field (of
any kind). any kind).
""" """
def __init__(self, field, ascending=True, case_insensitive=True): def __init__(self, field, ascending=True, case_insensitive=True):
self.field = field self.field = field
self.ascending = ascending self.ascending = ascending
@ -795,7 +878,7 @@ class FieldSort(Sort):
def key(item): def key(item):
field_val = item.get(self.field, '') field_val = item.get(self.field, '')
if self.case_insensitive and isinstance(field_val, unicode): if self.case_insensitive and isinstance(field_val, six.text_type):
field_val = field_val.lower() field_val = field_val.lower()
return field_val return field_val
@ -820,6 +903,7 @@ class FieldSort(Sort):
class FixedFieldSort(FieldSort): class FixedFieldSort(FieldSort):
"""Sort object to sort on a fixed field. """Sort object to sort on a fixed field.
""" """
def order_clause(self): def order_clause(self):
order = "ASC" if self.ascending else "DESC" order = "ASC" if self.ascending else "DESC"
if self.case_insensitive: if self.case_insensitive:
@ -836,12 +920,14 @@ class SlowFieldSort(FieldSort):
"""A sort criterion by some model field other than a fixed field: """A sort criterion by some model field other than a fixed field:
i.e., a computed or flexible field. i.e., a computed or flexible field.
""" """
def is_slow(self): def is_slow(self):
return True return True
class NullSort(Sort): class NullSort(Sort):
"""No sorting. Leave results unsorted.""" """No sorting. Leave results unsorted."""
def sort(self, items): def sort(self, items):
return items return items

View file

@ -19,6 +19,10 @@ from __future__ import division, absolute_import, print_function
from . import query from . import query
from beets.util import str2bool from beets.util import str2bool
import six
if not six.PY2:
buffer = memoryview # sqlite won't accept memoryview in python 2
# Abstract base. # Abstract base.
@ -37,7 +41,7 @@ class Type(object):
"""The `Query` subclass to be used when querying the field. """The `Query` subclass to be used when querying the field.
""" """
model_type = unicode model_type = six.text_type
"""The Python type that is used to represent the value in the model. """The Python type that is used to represent the value in the model.
The model is guaranteed to return a value of this type if the field The model is guaranteed to return a value of this type if the field
@ -61,9 +65,9 @@ class Type(object):
if value is None: if value is None:
value = u'' value = u''
if isinstance(value, bytes): if isinstance(value, bytes):
value = value.decode('utf8', 'ignore') value = value.decode('utf-8', 'ignore')
return unicode(value) return six.text_type(value)
def parse(self, string): def parse(self, string):
"""Parse a (possibly human-written) string and return the """Parse a (possibly human-written) string and return the
@ -97,12 +101,12 @@ class Type(object):
https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types
Flexible fields have the type affinity `TEXT`. This means the Flexible fields have the type affinity `TEXT`. This means the
`sql_value` is either a `buffer` or a `unicode` object` and the `sql_value` is either a `buffer`/`memoryview` or a `unicode` object`
method must handle these in addition. and the method must handle these in addition.
""" """
if isinstance(sql_value, buffer): if isinstance(sql_value, buffer):
sql_value = bytes(sql_value).decode('utf8', 'ignore') sql_value = bytes(sql_value).decode('utf-8', 'ignore')
if isinstance(sql_value, unicode): if isinstance(sql_value, six.text_type):
return self.parse(sql_value) return self.parse(sql_value)
else: else:
return self.normalize(sql_value) return self.normalize(sql_value)
@ -194,7 +198,7 @@ class Boolean(Type):
model_type = bool model_type = bool
def format(self, value): def format(self, value):
return unicode(bool(value)) return six.text_type(bool(value))
def parse(self, string): def parse(self, string):
return str2bool(string) return str2bool(string)

View file

@ -37,14 +37,13 @@ from beets import dbcore
from beets import plugins from beets import plugins
from beets import util from beets import util
from beets import config from beets import config
from beets.util import pipeline, sorted_walk, ancestry from beets.util import pipeline, sorted_walk, ancestry, MoveOperation
from beets.util import syspath, normpath, displayable_path from beets.util import syspath, normpath, displayable_path
from enum import Enum from enum import Enum
from beets import mediafile from beets import mediafile
action = Enum('action', action = Enum('action',
['SKIP', 'ASIS', 'TRACKS', 'MANUAL', 'APPLY', 'MANUAL_ID', ['SKIP', 'ASIS', 'TRACKS', 'APPLY', 'ALBUMS', 'RETAG'])
'ALBUMS', 'RETAG'])
# The RETAG action represents "don't apply any match, but do record # The RETAG action represents "don't apply any match, but do record
# new metadata". It's not reachable via the standard command prompt but # new metadata". It's not reachable via the standard command prompt but
# can be used by plugins. # can be used by plugins.
@ -69,7 +68,7 @@ class ImportAbort(Exception):
def _open_state(): def _open_state():
"""Reads the state file, returning a dictionary.""" """Reads the state file, returning a dictionary."""
try: try:
with open(config['statefile'].as_filename()) as f: with open(config['statefile'].as_filename(), 'rb') as f:
return pickle.load(f) return pickle.load(f)
except Exception as exc: except Exception as exc:
# The `pickle` module can emit all sorts of exceptions during # The `pickle` module can emit all sorts of exceptions during
@ -83,7 +82,7 @@ def _open_state():
def _save_state(state): def _save_state(state):
"""Writes the state dictionary out to disk.""" """Writes the state dictionary out to disk."""
try: try:
with open(config['statefile'].as_filename(), 'w') as f: with open(config['statefile'].as_filename(), 'wb') as f:
pickle.dump(state, f) pickle.dump(state, f)
except IOError as exc: except IOError as exc:
log.error(u'state file could not be written: {0}', exc) log.error(u'state file could not be written: {0}', exc)
@ -189,6 +188,8 @@ class ImportSession(object):
self.paths = paths self.paths = paths
self.query = query self.query = query
self._is_resuming = dict() self._is_resuming = dict()
self._merged_items = set()
self._merged_dirs = set()
# Normalize the paths. # Normalize the paths.
if self.paths: if self.paths:
@ -221,13 +222,19 @@ class ImportSession(object):
iconfig['resume'] = False iconfig['resume'] = False
iconfig['incremental'] = False iconfig['incremental'] = False
# Copy, move, and link are mutually exclusive. # Copy, move, link, and hardlink are mutually exclusive.
if iconfig['move']: if iconfig['move']:
iconfig['copy'] = False iconfig['copy'] = False
iconfig['link'] = False iconfig['link'] = False
iconfig['hardlink'] = False
elif iconfig['link']: elif iconfig['link']:
iconfig['copy'] = False iconfig['copy'] = False
iconfig['move'] = False iconfig['move'] = False
iconfig['hardlink'] = False
elif iconfig['hardlink']:
iconfig['copy'] = False
iconfig['move'] = False
iconfig['link'] = False
# Only delete when copying. # Only delete when copying.
if not iconfig['copy']: if not iconfig['copy']:
@ -306,6 +313,8 @@ class ImportSession(object):
stages += [import_asis(self)] stages += [import_asis(self)]
# Plugin stages. # Plugin stages.
for stage_func in plugins.early_import_stages():
stages.append(plugin_stage(self, stage_func))
for stage_func in plugins.import_stages(): for stage_func in plugins.import_stages():
stages.append(plugin_stage(self, stage_func)) stages.append(plugin_stage(self, stage_func))
@ -331,7 +340,7 @@ class ImportSession(object):
been imported in a previous session. been imported in a previous session.
""" """
if self.is_resuming(toppath) \ if self.is_resuming(toppath) \
and all(map(lambda p: progress_element(toppath, p), paths)): and all([progress_element(toppath, p) for p in paths]):
return True return True
if self.config['incremental'] \ if self.config['incremental'] \
and tuple(paths) in self.history_dirs: and tuple(paths) in self.history_dirs:
@ -345,6 +354,24 @@ class ImportSession(object):
self._history_dirs = history_get() self._history_dirs = history_get()
return self._history_dirs return self._history_dirs
def already_merged(self, paths):
"""Returns true if all the paths being imported were part of a merge
during previous tasks.
"""
for path in paths:
if path not in self._merged_items \
and path not in self._merged_dirs:
return False
return True
def mark_merged(self, paths):
"""Mark paths and directories as merged for future reimport tasks.
"""
self._merged_items.update(paths)
dirs = set([os.path.dirname(path) if os.path.isfile(path) else path
for path in paths])
self._merged_dirs.update(dirs)
def is_resuming(self, toppath): def is_resuming(self, toppath):
"""Return `True` if user wants to resume import of this path. """Return `True` if user wants to resume import of this path.
@ -362,7 +389,7 @@ class ImportSession(object):
# Either accept immediately or prompt for input to decide. # Either accept immediately or prompt for input to decide.
if self.want_resume is True or \ if self.want_resume is True or \
self.should_resume(toppath): self.should_resume(toppath):
log.warn(u'Resuming interrupted import of {0}', log.warning(u'Resuming interrupted import of {0}',
util.displayable_path(toppath)) util.displayable_path(toppath))
self._is_resuming[toppath] = True self._is_resuming[toppath] = True
else: else:
@ -424,6 +451,9 @@ class ImportTask(BaseImportTask):
* `manipulate_files()` Copy, move, and write files depending on the * `manipulate_files()` Copy, move, and write files depending on the
session configuration. session configuration.
* `set_fields()` Sets the fields given at CLI or configuration to
the specified values.
* `finalize()` Update the import progress and cleanup the file * `finalize()` Update the import progress and cleanup the file
system. system.
""" """
@ -435,6 +465,7 @@ class ImportTask(BaseImportTask):
self.candidates = [] self.candidates = []
self.rec = None self.rec = None
self.should_remove_duplicates = False self.should_remove_duplicates = False
self.should_merge_duplicates = False
self.is_album = True self.is_album = True
self.search_ids = [] # user-supplied candidate IDs. self.search_ids = [] # user-supplied candidate IDs.
@ -443,7 +474,6 @@ class ImportTask(BaseImportTask):
indicates that an action has been selected for this task. indicates that an action has been selected for this task.
""" """
# Not part of the task structure: # Not part of the task structure:
assert choice not in (action.MANUAL, action.MANUAL_ID)
assert choice != action.APPLY # Only used internally. assert choice != action.APPLY # Only used internally.
if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS, if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS,
action.RETAG): action.RETAG):
@ -499,13 +529,17 @@ class ImportTask(BaseImportTask):
if self.choice_flag in (action.ASIS, action.RETAG): if self.choice_flag in (action.ASIS, action.RETAG):
return list(self.items) return list(self.items)
elif self.choice_flag == action.APPLY: elif self.choice_flag == action.APPLY:
return self.match.mapping.keys() return list(self.match.mapping.keys())
else: else:
assert False assert False
def apply_metadata(self): def apply_metadata(self):
"""Copy metadata from match info to the items. """Copy metadata from match info to the items.
""" """
if config['import']['from_scratch']:
for item in self.match.mapping:
item.clear()
autotag.apply_metadata(self.match.info, self.match.mapping) autotag.apply_metadata(self.match.info, self.match.mapping)
def duplicate_items(self, lib): def duplicate_items(self, lib):
@ -526,13 +560,29 @@ class ImportTask(BaseImportTask):
util.prune_dirs(os.path.dirname(item.path), util.prune_dirs(os.path.dirname(item.path),
lib.directory) lib.directory)
def set_fields(self):
"""Sets the fields given at CLI or configuration to the specified
values.
"""
for field, view in config['import']['set_fields'].items():
value = view.get()
log.debug(u'Set field {1}={2} for {0}',
displayable_path(self.paths),
field,
value)
self.album[field] = value
self.album.store()
def finalize(self, session): def finalize(self, session):
"""Save progress, clean up files, and emit plugin event. """Save progress, clean up files, and emit plugin event.
""" """
# Update progress. # Update progress.
if session.want_resume: if session.want_resume:
self.save_progress() self.save_progress()
if session.config['incremental']: if session.config['incremental'] and not (
# Should we skip recording to incremental list?
self.skip and session.config['incremental_skip_later']
):
self.save_history() self.save_history()
self.cleanup(copy=session.config['copy'], self.cleanup(copy=session.config['copy'],
@ -587,12 +637,12 @@ class ImportTask(BaseImportTask):
candidate IDs are stored in self.search_ids: if present, the candidate IDs are stored in self.search_ids: if present, the
initial lookup is restricted to only those IDs. initial lookup is restricted to only those IDs.
""" """
artist, album, candidates, recommendation = \ artist, album, prop = \
autotag.tag_album(self.items, search_ids=self.search_ids) autotag.tag_album(self.items, search_ids=self.search_ids)
self.cur_artist = artist self.cur_artist = artist
self.cur_album = album self.cur_album = album
self.candidates = candidates self.candidates = prop.candidates
self.rec = recommendation self.rec = prop.recommendation
def find_duplicates(self, lib): def find_duplicates(self, lib):
"""Return a list of albums from `lib` with the same artist and """Return a list of albums from `lib` with the same artist and
@ -612,10 +662,11 @@ class ImportTask(BaseImportTask):
)) ))
for album in lib.albums(duplicate_query): for album in lib.albums(duplicate_query):
# Check whether the album is identical in contents, in which # Check whether the album paths are all present in the task
# case it is not a duplicate (will be replaced). # i.e. album is being completely re-imported by the task,
# in which case it is not a duplicate (will be replaced).
album_paths = set(i.path for i in album.items()) album_paths = set(i.path for i in album.items())
if album_paths != task_paths: if not (album_paths <= task_paths):
duplicates.append(album) duplicates.append(album)
return duplicates return duplicates
@ -640,7 +691,7 @@ class ImportTask(BaseImportTask):
changes['comp'] = False changes['comp'] = False
else: else:
# VA. # VA.
changes['albumartist'] = config['va_name'].get(unicode) changes['albumartist'] = config['va_name'].as_str()
changes['comp'] = True changes['comp'] = True
elif self.choice_flag in (action.APPLY, action.RETAG): elif self.choice_flag in (action.APPLY, action.RETAG):
@ -655,20 +706,28 @@ class ImportTask(BaseImportTask):
for item in self.items: for item in self.items:
item.update(changes) item.update(changes)
def manipulate_files(self, move=False, copy=False, write=False, def manipulate_files(self, operation=None, write=False, session=None):
link=False, session=None): """ Copy, move, link or hardlink (depending on `operation`) the files
as well as write metadata.
`operation` should be an instance of `util.MoveOperation`.
If `write` is `True` metadata is written to the files.
"""
items = self.imported_items() items = self.imported_items()
# Save the original paths of all items for deletion and pruning # Save the original paths of all items for deletion and pruning
# in the next step (finalization). # in the next step (finalization).
self.old_paths = [item.path for item in items] self.old_paths = [item.path for item in items]
for item in items: for item in items:
if move or copy or link: if operation is not None:
# In copy and link modes, treat re-imports specially: # In copy and link modes, treat re-imports specially:
# move in-library files. (Out-of-library files are # move in-library files. (Out-of-library files are
# copied/moved as usual). # copied/moved as usual).
old_path = item.path old_path = item.path
if (copy or link) and self.replaced_items[item] and \ if (operation != MoveOperation.MOVE
session.lib.directory in util.ancestry(old_path): and self.replaced_items[item]
and session.lib.directory in util.ancestry(old_path)):
item.move() item.move()
# We moved the item, so remove the # We moved the item, so remove the
# now-nonexistent file from old_paths. # now-nonexistent file from old_paths.
@ -676,7 +735,7 @@ class ImportTask(BaseImportTask):
else: else:
# A normal import. Just copy files and keep track of # A normal import. Just copy files and keep track of
# old paths. # old paths.
item.move(copy, link) item.move(operation)
if write and (self.apply or self.choice_flag == action.RETAG): if write and (self.apply or self.choice_flag == action.RETAG):
item.try_write() item.try_write()
@ -830,10 +889,9 @@ class SingletonImportTask(ImportTask):
plugins.send('item_imported', lib=lib, item=item) plugins.send('item_imported', lib=lib, item=item)
def lookup_candidates(self): def lookup_candidates(self):
candidates, recommendation = autotag.tag_item( prop = autotag.tag_item(self.item, search_ids=self.search_ids)
self.item, search_ids=self.search_ids) self.candidates = prop.candidates
self.candidates = candidates self.rec = prop.recommendation
self.rec = recommendation
def find_duplicates(self, lib): def find_duplicates(self, lib):
"""Return a list of items from `lib` that have the same artist """Return a list of items from `lib` that have the same artist
@ -874,6 +932,19 @@ class SingletonImportTask(ImportTask):
def reload(self): def reload(self):
self.item.load() self.item.load()
def set_fields(self):
"""Sets the fields given at CLI or configuration to the specified
values.
"""
for field, view in config['import']['set_fields'].items():
value = view.get()
log.debug(u'Set field {1}={2} for {0}',
displayable_path(self.paths),
field,
value)
self.item[field] = value
self.item.store()
# FIXME The inheritance relationships are inverted. This is why there # FIXME The inheritance relationships are inverted. This is why there
# are so many methods which pass. More responsibility should be delegated to # are so many methods which pass. More responsibility should be delegated to
@ -944,7 +1015,7 @@ class ArchiveImportTask(SentinelImportTask):
return False return False
for path_test, _ in cls.handlers(): for path_test, _ in cls.handlers():
if path_test(path): if path_test(util.py3_path(path)):
return True return True
return False return False
@ -985,12 +1056,12 @@ class ArchiveImportTask(SentinelImportTask):
`toppath` to that directory. `toppath` to that directory.
""" """
for path_test, handler_class in self.handlers(): for path_test, handler_class in self.handlers():
if path_test(self.toppath): if path_test(util.py3_path(self.toppath)):
break break
try: try:
extract_to = mkdtemp() extract_to = mkdtemp()
archive = handler_class(self.toppath, mode='r') archive = handler_class(util.py3_path(self.toppath), mode='r')
archive.extractall(extract_to) archive.extractall(extract_to)
finally: finally:
archive.close() archive.close()
@ -1148,7 +1219,7 @@ class ImportTaskFactory(object):
if not (self.session.config['move'] or if not (self.session.config['move'] or
self.session.config['copy']): self.session.config['copy']):
log.warn(u"Archive importing requires either " log.warning(u"Archive importing requires either "
u"'copy' or 'move' to be enabled.") u"'copy' or 'move' to be enabled.")
return return
@ -1179,12 +1250,33 @@ class ImportTaskFactory(object):
# Silently ignore non-music files. # Silently ignore non-music files.
pass pass
elif isinstance(exc.reason, mediafile.UnreadableFileError): elif isinstance(exc.reason, mediafile.UnreadableFileError):
log.warn(u'unreadable file: {0}', displayable_path(path)) log.warning(u'unreadable file: {0}', displayable_path(path))
else: else:
log.error(u'error reading {0}: {1}', log.error(u'error reading {0}: {1}',
displayable_path(path), exc) displayable_path(path), exc)
# Pipeline utilities
def _freshen_items(items):
# Clear IDs from re-tagged items so they appear "fresh" when
# we add them back to the library.
for item in items:
item.id = None
item.album_id = None
def _extend_pipeline(tasks, *stages):
# Return pipeline extension for stages with list of tasks
if type(tasks) == list:
task_iter = iter(tasks)
else:
task_iter = tasks
ipl = pipeline.Pipeline([task_iter] + list(stages))
return pipeline.multiple(ipl.pull())
# Full-album pipeline stages. # Full-album pipeline stages.
def read_tasks(session): def read_tasks(session):
@ -1204,7 +1296,7 @@ def read_tasks(session):
skipped += task_factory.skipped skipped += task_factory.skipped
if not task_factory.imported: if not task_factory.imported:
log.warn(u'No files imported from {0}', log.warning(u'No files imported from {0}',
displayable_path(toppath)) displayable_path(toppath))
# Show skipped directories (due to incremental/resume). # Show skipped directories (due to incremental/resume).
@ -1230,12 +1322,7 @@ def query_tasks(session):
log.debug(u'yielding album {0}: {1} - {2}', log.debug(u'yielding album {0}: {1} - {2}',
album.id, album.albumartist, album.album) album.id, album.albumartist, album.album)
items = list(album.items()) items = list(album.items())
_freshen_items(items)
# Clear IDs from re-tagged items so they appear "fresh" when
# we add them back to the library.
for item in items:
item.id = None
item.album_id = None
task = ImportTask(None, [album.item_dir()], items) task = ImportTask(None, [album.item_dir()], items)
for task in task.handle_created(session): for task in task.handle_created(session):
@ -1281,6 +1368,9 @@ def user_query(session, task):
if task.skip: if task.skip:
return task return task
if session.already_merged(task.paths):
return pipeline.BUBBLE
# Ask the user for a choice. # Ask the user for a choice.
task.choose_match(session) task.choose_match(session)
plugins.send('import_task_choice', session=session, task=task) plugins.send('import_task_choice', session=session, task=task)
@ -1295,24 +1385,38 @@ def user_query(session, task):
yield new_task yield new_task
yield SentinelImportTask(task.toppath, task.paths) yield SentinelImportTask(task.toppath, task.paths)
ipl = pipeline.Pipeline([ return _extend_pipeline(emitter(task),
emitter(task),
lookup_candidates(session), lookup_candidates(session),
user_query(session), user_query(session))
])
return pipeline.multiple(ipl.pull())
# As albums: group items by albums and create task for each album # As albums: group items by albums and create task for each album
if task.choice_flag is action.ALBUMS: if task.choice_flag is action.ALBUMS:
ipl = pipeline.Pipeline([ return _extend_pipeline([task],
iter([task]),
group_albums(session), group_albums(session),
lookup_candidates(session), lookup_candidates(session),
user_query(session) user_query(session))
])
return pipeline.multiple(ipl.pull())
resolve_duplicates(session, task) resolve_duplicates(session, task)
if task.should_merge_duplicates:
# Create a new task for tagging the current items
# and duplicates together
duplicate_items = task.duplicate_items(session.lib)
# Duplicates would be reimported so make them look "fresh"
_freshen_items(duplicate_items)
duplicate_paths = [item.path for item in duplicate_items]
# Record merged paths in the session so they are not reimported
session.mark_merged(duplicate_paths)
merged_task = ImportTask(None, task.paths + duplicate_paths,
task.items + duplicate_items)
return _extend_pipeline([merged_task],
lookup_candidates(session),
user_query(session))
apply_choice(session, task) apply_choice(session, task)
return task return task
@ -1327,7 +1431,33 @@ def resolve_duplicates(session, task):
log.debug(u'found duplicates: {}'.format( log.debug(u'found duplicates: {}'.format(
[o.id for o in found_duplicates] [o.id for o in found_duplicates]
)) ))
# Get the default action to follow from config.
duplicate_action = config['import']['duplicate_action'].as_choice({
u'skip': u's',
u'keep': u'k',
u'remove': u'r',
u'merge': u'm',
u'ask': u'a',
})
log.debug(u'default action for duplicates: {0}', duplicate_action)
if duplicate_action == u's':
# Skip new.
task.set_choice(action.SKIP)
elif duplicate_action == u'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif duplicate_action == u'r':
# Remove old.
task.should_remove_duplicates = True
elif duplicate_action == u'm':
# Merge duplicates together
task.should_merge_duplicates = True
else:
# No default action set; ask the session.
session.resolve_duplicate(task, found_duplicates) session.resolve_duplicate(task, found_duplicates)
session.log_choice(task, True) session.log_choice(task, True)
@ -1360,6 +1490,14 @@ def apply_choice(session, task):
task.add(session.lib) task.add(session.lib)
# If ``set_fields`` is set, set those fields to the
# configured values.
# NOTE: This cannot be done before the ``task.add()`` call above,
# because then the ``ImportTask`` won't have an `album` for which
# it can set the fields.
if config['import']['set_fields']:
task.set_fields()
@pipeline.mutator_stage @pipeline.mutator_stage
def plugin_stage(session, func, task): def plugin_stage(session, func, task):
@ -1388,11 +1526,20 @@ def manipulate_files(session, task):
if task.should_remove_duplicates: if task.should_remove_duplicates:
task.remove_duplicates(session.lib) task.remove_duplicates(session.lib)
if session.config['move']:
operation = MoveOperation.MOVE
elif session.config['copy']:
operation = MoveOperation.COPY
elif session.config['link']:
operation = MoveOperation.LINK
elif session.config['hardlink']:
operation = MoveOperation.HARDLINK
else:
operation = None
task.manipulate_files( task.manipulate_files(
move=session.config['move'], operation,
copy=session.config['copy'],
write=session.config['write'], write=session.config['write'],
link=session.config['link'],
session=session, session=session,
) )
@ -1439,8 +1586,16 @@ def group_albums(session):
task = pipeline.multiple(tasks) task = pipeline.multiple(tasks)
MULTIDISC_MARKERS = (r'dis[ck]', r'cd') MULTIDISC_MARKERS = (br'dis[ck]', br'cd')
MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d' MULTIDISC_PAT_FMT = br'^(.*%s[\W_]*)\d'
def is_subdir_of_any_in_list(path, dirs):
"""Returns True if path os a subdirectory of any directory in dirs
(a list). In other case, returns False.
"""
ancestors = ancestry(path)
return any(d in ancestors for d in dirs)
def albums_in_dir(path): def albums_in_dir(path):
@ -1462,7 +1617,7 @@ def albums_in_dir(path):
# and add the current directory. If so, just add the directory # and add the current directory. If so, just add the directory
# and move on to the next directory. If not, stop collapsing. # and move on to the next directory. If not, stop collapsing.
if collapse_paths: if collapse_paths:
if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \ if (is_subdir_of_any_in_list(root, collapse_paths)) or \
(collapse_pat and (collapse_pat and
collapse_pat.match(os.path.basename(root))): collapse_pat.match(os.path.basename(root))):
# Still collapsing. # Still collapsing.
@ -1483,7 +1638,9 @@ def albums_in_dir(path):
# named in this way. # named in this way.
start_collapsing = False start_collapsing = False
for marker in MULTIDISC_MARKERS: for marker in MULTIDISC_MARKERS:
marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I) # We're using replace on %s due to lack of .format() on bytestrings
p = MULTIDISC_PAT_FMT.replace(b'%s', marker)
marker_pat = re.compile(p, re.I)
match = marker_pat.match(os.path.basename(root)) match = marker_pat.match(os.path.basename(root))
# Is this directory the root of a nested multi-disc album? # Is this directory the root of a nested multi-disc album?
@ -1492,13 +1649,16 @@ def albums_in_dir(path):
start_collapsing = True start_collapsing = True
subdir_pat = None subdir_pat = None
for subdir in dirs: for subdir in dirs:
subdir = util.bytestring_path(subdir)
# The first directory dictates the pattern for # The first directory dictates the pattern for
# the remaining directories. # the remaining directories.
if not subdir_pat: if not subdir_pat:
match = marker_pat.match(subdir) match = marker_pat.match(subdir)
if match: if match:
match_group = re.escape(match.group(1))
subdir_pat = re.compile( subdir_pat = re.compile(
br'^%s\d' % re.escape(match.group(1)), re.I b''.join([b'^', match_group, br'\d']),
re.I
) )
else: else:
start_collapsing = False start_collapsing = False
@ -1520,7 +1680,8 @@ def albums_in_dir(path):
# Set the current pattern to match directories with the same # Set the current pattern to match directories with the same
# prefix as this one, followed by a digit. # prefix as this one, followed by a digit.
collapse_pat = re.compile( collapse_pat = re.compile(
br'^%s\d' % re.escape(match.group(1)), re.I b''.join([b'^', re.escape(match.group(1)), br'\d']),
re.I
) )
break break

View file

@ -22,18 +22,27 @@ import sys
import unicodedata import unicodedata
import time import time
import re import re
from unidecode import unidecode import six
from beets import logging from beets import logging
from beets.mediafile import MediaFile, MutagenError, UnreadableFileError from beets.mediafile import MediaFile, UnreadableFileError
from beets import plugins from beets import plugins
from beets import util from beets import util
from beets.util import bytestring_path, syspath, normpath, samefile from beets.util import bytestring_path, syspath, normpath, samefile, \
MoveOperation
from beets.util.functemplate import Template from beets.util.functemplate import Template
from beets import dbcore from beets import dbcore
from beets.dbcore import types from beets.dbcore import types
import beets import beets
# To use the SQLite "blob" type, it doesn't suffice to provide a byte
# string; SQLite treats that as encoded text. Wrapping it in a `buffer` or a
# `memoryview`, depending on the Python version, tells it that we
# actually mean non-text data.
if six.PY2:
BLOB_TYPE = buffer # noqa: F821
else:
BLOB_TYPE = memoryview
log = logging.getLogger('beets') log = logging.getLogger('beets')
@ -48,9 +57,6 @@ class PathQuery(dbcore.FieldQuery):
and case-sensitive otherwise. and case-sensitive otherwise.
""" """
escape_re = re.compile(r'[\\_%]')
escape_char = b'\\'
def __init__(self, field, pattern, fast=True, case_sensitive=None): def __init__(self, field, pattern, fast=True, case_sensitive=None):
"""Create a path query. `pattern` must be a path, either to a """Create a path query. `pattern` must be a path, either to a
file or a directory. file or a directory.
@ -85,28 +91,31 @@ class PathQuery(dbcore.FieldQuery):
colon = query_part.find(':') colon = query_part.find(':')
if colon != -1: if colon != -1:
query_part = query_part[:colon] query_part = query_part[:colon]
return (os.sep in query_part and
os.path.exists(syspath(normpath(query_part)))) # Test both `sep` and `altsep` (i.e., both slash and backslash on
# Windows).
return (
(os.sep in query_part or
(os.altsep and os.altsep in query_part)) and
os.path.exists(syspath(normpath(query_part)))
)
def match(self, item): def match(self, item):
path = item.path if self.case_sensitive else item.path.lower() path = item.path if self.case_sensitive else item.path.lower()
return (path == self.file_path) or path.startswith(self.dir_path) return (path == self.file_path) or path.startswith(self.dir_path)
def col_clause(self): def col_clause(self):
if self.case_sensitive: file_blob = BLOB_TYPE(self.file_path)
file_blob = buffer(self.file_path) dir_blob = BLOB_TYPE(self.dir_path)
dir_blob = buffer(self.dir_path)
return '({0} = ?) || (substr({0}, 1, ?) = ?)'.format(self.field), \
(file_blob, len(dir_blob), dir_blob)
escape = lambda m: self.escape_char + m.group(0) if self.case_sensitive:
dir_pattern = self.escape_re.sub(escape, self.dir_path) query_part = '({0} = ?) || (substr({0}, 1, ?) = ?)'
dir_blob = buffer(dir_pattern + b'%') else:
file_pattern = self.escape_re.sub(escape, self.file_path) query_part = '(BYTELOWER({0}) = BYTELOWER(?)) || \
file_blob = buffer(file_pattern) (substr(BYTELOWER({0}), 1, ?) = BYTELOWER(?))'
return '({0} LIKE ? ESCAPE ?) || ({0} LIKE ? ESCAPE ?)'.format(
self.field), (file_blob, self.escape_char, dir_blob, return query_part.format(self.field), \
self.escape_char) (file_blob, len(dir_blob), dir_blob)
# Library-specific field types. # Library-specific field types.
@ -117,14 +126,15 @@ class DateType(types.Float):
query = dbcore.query.DateQuery query = dbcore.query.DateQuery
def format(self, value): def format(self, value):
return time.strftime(beets.config['time_format'].get(unicode), return time.strftime(beets.config['time_format'].as_str(),
time.localtime(value or 0)) time.localtime(value or 0))
def parse(self, string): def parse(self, string):
try: try:
# Try a formatted date string. # Try a formatted date string.
return time.mktime( return time.mktime(
time.strptime(string, beets.config['time_format'].get(unicode)) time.strptime(string,
beets.config['time_format'].as_str())
) )
except ValueError: except ValueError:
# Fall back to a plain timestamp number. # Fall back to a plain timestamp number.
@ -135,10 +145,27 @@ class DateType(types.Float):
class PathType(types.Type): class PathType(types.Type):
"""A dbcore type for filesystem paths. These are represented as
`bytes` objects, in keeping with the Unix filesystem abstraction.
"""
sql = u'BLOB' sql = u'BLOB'
query = PathQuery query = PathQuery
model_type = bytes model_type = bytes
def __init__(self, nullable=False):
"""Create a path type object. `nullable` controls whether the
type may be missing, i.e., None.
"""
self.nullable = nullable
@property
def null(self):
if self.nullable:
return None
else:
return b''
def format(self, value): def format(self, value):
return util.displayable_path(value) return util.displayable_path(value)
@ -146,12 +173,11 @@ class PathType(types.Type):
return normpath(bytestring_path(string)) return normpath(bytestring_path(string))
def normalize(self, value): def normalize(self, value):
if isinstance(value, unicode): if isinstance(value, six.text_type):
# Paths stored internally as encoded bytes. # Paths stored internally as encoded bytes.
return bytestring_path(value) return bytestring_path(value)
elif isinstance(value, buffer): elif isinstance(value, BLOB_TYPE):
# SQLite must store bytestings as buffers to avoid decoding.
# We unwrap buffers to bytes. # We unwrap buffers to bytes.
return bytes(value) return bytes(value)
@ -163,7 +189,7 @@ class PathType(types.Type):
def to_sql(self, value): def to_sql(self, value):
if isinstance(value, bytes): if isinstance(value, bytes):
value = buffer(value) value = BLOB_TYPE(value)
return value return value
@ -180,6 +206,8 @@ class MusicalKey(types.String):
r'bb': 'a#', r'bb': 'a#',
} }
null = None
def parse(self, key): def parse(self, key):
key = key.lower() key = key.lower()
for flat, sharp in self.ENHARMONIC.items(): for flat, sharp in self.ENHARMONIC.items():
@ -254,7 +282,7 @@ PF_KEY_DEFAULT = 'default'
# Exceptions. # Exceptions.
@six.python_2_unicode_compatible
class FileOperationError(Exception): class FileOperationError(Exception):
"""Indicates an error when interacting with a file on disk. """Indicates an error when interacting with a file on disk.
Possibilities include an unsupported media type, a permissions Possibilities include an unsupported media type, a permissions
@ -268,35 +296,39 @@ class FileOperationError(Exception):
self.path = path self.path = path
self.reason = reason self.reason = reason
def __unicode__(self): def text(self):
"""Get a string representing the error. Describes both the """Get a string representing the error. Describes both the
underlying reason and the file path in question. underlying reason and the file path in question.
""" """
return u'{0}: {1}'.format( return u'{0}: {1}'.format(
util.displayable_path(self.path), util.displayable_path(self.path),
unicode(self.reason) six.text_type(self.reason)
) )
def __str__(self): # define __str__ as text to avoid infinite loop on super() calls
return unicode(self).encode('utf8') # with @six.python_2_unicode_compatible
__str__ = text
@six.python_2_unicode_compatible
class ReadError(FileOperationError): class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`). """An error while reading a file (i.e. in `Item.read`).
""" """
def __unicode__(self): def __str__(self):
return u'error reading ' + super(ReadError, self).__unicode__() return u'error reading ' + super(ReadError, self).text()
@six.python_2_unicode_compatible
class WriteError(FileOperationError): class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`). """An error while writing a file (i.e. in `Item.write`).
""" """
def __unicode__(self): def __str__(self):
return u'error writing ' + super(WriteError, self).__unicode__() return u'error writing ' + super(WriteError, self).text()
# Item and Album model classes. # Item and Album model classes.
@six.python_2_unicode_compatible
class LibModel(dbcore.Model): class LibModel(dbcore.Model):
"""Shared concrete functionality for Items and Albums. """Shared concrete functionality for Items and Albums.
""" """
@ -310,8 +342,8 @@ class LibModel(dbcore.Model):
funcs.update(plugins.template_funcs()) funcs.update(plugins.template_funcs())
return funcs return funcs
def store(self): def store(self, fields=None):
super(LibModel, self).store() super(LibModel, self).store(fields)
plugins.send('database_change', lib=self._db, model=self) plugins.send('database_change', lib=self._db, model=self)
def remove(self): def remove(self):
@ -324,20 +356,16 @@ class LibModel(dbcore.Model):
def __format__(self, spec): def __format__(self, spec):
if not spec: if not spec:
spec = beets.config[self._format_config_key].get(unicode) spec = beets.config[self._format_config_key].as_str()
result = self.evaluate_template(spec) assert isinstance(spec, six.text_type)
if isinstance(spec, bytes): return self.evaluate_template(spec)
# if spec is a byte string then we must return a one as well
return result.encode('utf8')
else:
return result
def __str__(self): def __str__(self):
return format(self).encode('utf8')
def __unicode__(self):
return format(self) return format(self)
def __bytes__(self):
return self.__str__().encode('utf-8')
class FormattedItemMapping(dbcore.db.FormattedMapping): class FormattedItemMapping(dbcore.db.FormattedMapping):
"""Add lookup for album-level fields. """Add lookup for album-level fields.
@ -407,7 +435,10 @@ class Item(LibModel):
'albumartist_sort': types.STRING, 'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING, 'albumartist_credit': types.STRING,
'genre': types.STRING, 'genre': types.STRING,
'lyricist': types.STRING,
'composer': types.STRING, 'composer': types.STRING,
'composer_sort': types.STRING,
'arranger': types.STRING,
'grouping': types.STRING, 'grouping': types.STRING,
'year': types.PaddedInt(4), 'year': types.PaddedInt(4),
'month': types.PaddedInt(2), 'month': types.PaddedInt(2),
@ -424,6 +455,7 @@ class Item(LibModel):
'mb_albumid': types.STRING, 'mb_albumid': types.STRING,
'mb_artistid': types.STRING, 'mb_artistid': types.STRING,
'mb_albumartistid': types.STRING, 'mb_albumartistid': types.STRING,
'mb_releasetrackid': types.STRING,
'albumtype': types.STRING, 'albumtype': types.STRING,
'label': types.STRING, 'label': types.STRING,
'acoustid_fingerprint': types.STRING, 'acoustid_fingerprint': types.STRING,
@ -443,6 +475,8 @@ class Item(LibModel):
'rg_track_peak': types.NULL_FLOAT, 'rg_track_peak': types.NULL_FLOAT,
'rg_album_gain': types.NULL_FLOAT, 'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT, 'rg_album_peak': types.NULL_FLOAT,
'r128_track_gain': types.PaddedInt(6),
'r128_album_gain': types.PaddedInt(6),
'original_year': types.PaddedInt(4), 'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2), 'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2), 'original_day': types.PaddedInt(2),
@ -510,15 +544,15 @@ class Item(LibModel):
""" """
# Encode unicode paths and read buffers. # Encode unicode paths and read buffers.
if key == 'path': if key == 'path':
if isinstance(value, unicode): if isinstance(value, six.text_type):
value = bytestring_path(value) value = bytestring_path(value)
elif isinstance(value, buffer): elif isinstance(value, BLOB_TYPE):
value = bytes(value) value = bytes(value)
if key in MediaFile.fields(): changed = super(Item, self)._setitem(key, value)
self.mtime = 0 # Reset mtime on dirty.
super(Item, self).__setitem__(key, value) if changed and key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
def update(self, values): def update(self, values):
"""Set all key/value pairs in the mapping. If mtime is """Set all key/value pairs in the mapping. If mtime is
@ -528,6 +562,11 @@ class Item(LibModel):
if self.mtime == 0 and 'mtime' in values: if self.mtime == 0 and 'mtime' in values:
self.mtime = values['mtime'] self.mtime = values['mtime']
def clear(self):
"""Set all key/value pairs to None."""
for key in self._media_fields:
setattr(self, key, None)
def get_album(self): def get_album(self):
"""Get the Album object that this item belongs to, if any, or """Get the Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a None if the item is a singleton or is not associated with a
@ -554,12 +593,12 @@ class Item(LibModel):
read_path = normpath(read_path) read_path = normpath(read_path)
try: try:
mediafile = MediaFile(syspath(read_path)) mediafile = MediaFile(syspath(read_path))
except (OSError, IOError, UnreadableFileError) as exc: except UnreadableFileError as exc:
raise ReadError(read_path, exc) raise ReadError(read_path, exc)
for key in self._media_fields: for key in self._media_fields:
value = getattr(mediafile, key) value = getattr(mediafile, key)
if isinstance(value, (int, long)): if isinstance(value, six.integer_types):
if value.bit_length() > 63: if value.bit_length() > 63:
value = 0 value = 0
self[key] = value self[key] = value
@ -601,14 +640,14 @@ class Item(LibModel):
try: try:
mediafile = MediaFile(syspath(path), mediafile = MediaFile(syspath(path),
id3v23=beets.config['id3v23'].get(bool)) id3v23=beets.config['id3v23'].get(bool))
except (OSError, IOError, UnreadableFileError) as exc: except UnreadableFileError as exc:
raise ReadError(self.path, exc) raise ReadError(path, exc)
# Write the tags to the file. # Write the tags to the file.
mediafile.update(item_tags) mediafile.update(item_tags)
try: try:
mediafile.save() mediafile.save()
except (OSError, IOError, MutagenError) as exc: except UnreadableFileError as exc:
raise WriteError(self.path, exc) raise WriteError(self.path, exc)
# The file has a new mtime. # The file has a new mtime.
@ -653,27 +692,34 @@ class Item(LibModel):
# Files themselves. # Files themselves.
def move_file(self, dest, copy=False, link=False): def move_file(self, dest, operation=MoveOperation.MOVE):
"""Moves or copies the item's file, updating the path value if """Move, copy, link or hardlink the item's depending on `operation`,
the move succeeds. If a file exists at ``dest``, then it is updating the path value if the move succeeds.
slightly modified to be unique.
If a file exists at `dest`, then it is slightly modified to be unique.
`operation` should be an instance of `util.MoveOperation`.
""" """
if not util.samefile(self.path, dest): if not util.samefile(self.path, dest):
dest = util.unique_path(dest) dest = util.unique_path(dest)
if copy: if operation == MoveOperation.MOVE:
util.copy(self.path, dest)
plugins.send("item_copied", item=self, source=self.path,
destination=dest)
elif link:
util.link(self.path, dest)
plugins.send("item_linked", item=self, source=self.path,
destination=dest)
else:
plugins.send("before_item_moved", item=self, source=self.path, plugins.send("before_item_moved", item=self, source=self.path,
destination=dest) destination=dest)
util.move(self.path, dest) util.move(self.path, dest)
plugins.send("item_moved", item=self, source=self.path, plugins.send("item_moved", item=self, source=self.path,
destination=dest) destination=dest)
elif operation == MoveOperation.COPY:
util.copy(self.path, dest)
plugins.send("item_copied", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.LINK:
util.link(self.path, dest)
plugins.send("item_linked", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.HARDLINK:
util.hardlink(self.path, dest)
plugins.send("item_hardlinked", item=self, source=self.path,
destination=dest)
# Either copying or moving succeeded, so update the stored path. # Either copying or moving succeeded, so update the stored path.
self.path = dest self.path = dest
@ -720,26 +766,27 @@ class Item(LibModel):
self._db._memotable = {} self._db._memotable = {}
def move(self, copy=False, link=False, basedir=None, with_album=True): def move(self, operation=MoveOperation.MOVE, basedir=None,
with_album=True, store=True):
"""Move the item to its designated location within the library """Move the item to its designated location within the library
directory (provided by destination()). Subdirectories are directory (provided by destination()). Subdirectories are
created as needed. If the operation succeeds, the item's path created as needed. If the operation succeeds, the item's path
field is updated to reflect the new location. field is updated to reflect the new location.
If `copy` is true, moving the file is copied rather than moved. Instead of moving the item it can also be copied, linked or hardlinked
Similarly, `link` creates a symlink instead. depending on `operation` which should be an instance of
`util.MoveOperation`.
basedir overrides the library base directory for the `basedir` overrides the library base directory for the destination.
destination.
If the item is in an album, the album is given an opportunity to If the item is in an album and `with_album` is `True`, the album is
move its art. (This can be disabled by passing given an opportunity to move its art.
with_album=False.)
The item is stored to the database if it is in the database, so By default, the item is stored to the database if it is in the
any dirty fields prior to the move() call will be written as a database, so any dirty fields prior to the move() call will be written
side effect. You probably want to call save() to commit the DB as a side effect.
transaction. If `store` is `False` however, the item won't be stored and you'll
have to manually store it after invoking this method.
""" """
self._check_db() self._check_db()
dest = self.destination(basedir=basedir) dest = self.destination(basedir=basedir)
@ -749,18 +796,20 @@ class Item(LibModel):
# Perform the move and store the change. # Perform the move and store the change.
old_path = self.path old_path = self.path
self.move_file(dest, copy, link) self.move_file(dest, operation)
if store:
self.store() self.store()
# If this item is in an album, move its art. # If this item is in an album, move its art.
if with_album: if with_album:
album = self.get_album() album = self.get_album()
if album: if album:
album.move_art(copy) album.move_art(operation)
if store:
album.store() album.store()
# Prune vacated directory. # Prune vacated directory.
if not copy: if operation == MoveOperation.MOVE:
util.prune_dirs(os.path.dirname(old_path), self._db.directory) util.prune_dirs(os.path.dirname(old_path), self._db.directory)
# Templating. # Templating.
@ -811,7 +860,10 @@ class Item(LibModel):
subpath = unicodedata.normalize('NFC', subpath) subpath = unicodedata.normalize('NFC', subpath)
if beets.config['asciify_paths']: if beets.config['asciify_paths']:
subpath = unidecode(subpath) subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
maxlen = beets.config['max_filename_length'].get(int) maxlen = beets.config['max_filename_length'].get(int)
if not maxlen: if not maxlen:
@ -833,7 +885,7 @@ class Item(LibModel):
) )
if fragment: if fragment:
return subpath return util.as_string(subpath)
else: else:
return normpath(os.path.join(basedir, subpath)) return normpath(os.path.join(basedir, subpath))
@ -848,7 +900,7 @@ class Album(LibModel):
_always_dirty = True _always_dirty = True
_fields = { _fields = {
'id': types.PRIMARY_ID, 'id': types.PRIMARY_ID,
'artpath': PathType(), 'artpath': PathType(True),
'added': DateType(), 'added': DateType(),
'albumartist': types.STRING, 'albumartist': types.STRING,
@ -875,6 +927,7 @@ class Album(LibModel):
'albumdisambig': types.STRING, 'albumdisambig': types.STRING,
'rg_album_gain': types.NULL_FLOAT, 'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT, 'rg_album_peak': types.NULL_FLOAT,
'r128_album_gain': types.PaddedInt(6),
'original_year': types.PaddedInt(4), 'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2), 'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2), 'original_day': types.PaddedInt(2),
@ -918,6 +971,7 @@ class Album(LibModel):
'albumdisambig', 'albumdisambig',
'rg_album_gain', 'rg_album_gain',
'rg_album_peak', 'rg_album_peak',
'r128_album_gain',
'original_year', 'original_year',
'original_month', 'original_month',
'original_day', 'original_day',
@ -962,9 +1016,12 @@ class Album(LibModel):
for item in self.items(): for item in self.items():
item.remove(delete, False) item.remove(delete, False)
def move_art(self, copy=False, link=False): def move_art(self, operation=MoveOperation.MOVE):
"""Move or copy any existing album art so that it remains in the """Move, copy, link or hardlink (depending on `operation`) any
same directory as the items. existing album art so that it remains in the same directory as
the items.
`operation` should be an instance of `util.MoveOperation`.
""" """
old_art = self.artpath old_art = self.artpath
if not old_art: if not old_art:
@ -978,38 +1035,46 @@ class Album(LibModel):
log.debug(u'moving album art {0} to {1}', log.debug(u'moving album art {0} to {1}',
util.displayable_path(old_art), util.displayable_path(old_art),
util.displayable_path(new_art)) util.displayable_path(new_art))
if copy: if operation == MoveOperation.MOVE:
util.copy(old_art, new_art)
elif link:
util.link(old_art, new_art)
else:
util.move(old_art, new_art) util.move(old_art, new_art)
util.prune_dirs(os.path.dirname(old_art), self._db.directory)
elif operation == MoveOperation.COPY:
util.copy(old_art, new_art)
elif operation == MoveOperation.LINK:
util.link(old_art, new_art)
elif operation == MoveOperation.HARDLINK:
util.hardlink(old_art, new_art)
self.artpath = new_art self.artpath = new_art
# Prune old path when moving. def move(self, operation=MoveOperation.MOVE, basedir=None, store=True):
if not copy: """Move, copy, link or hardlink (depending on `operation`)
util.prune_dirs(os.path.dirname(old_art), all items to their destination. Any album art moves along with them.
self._db.directory)
def move(self, copy=False, link=False, basedir=None): `basedir` overrides the library base directory for the destination.
"""Moves (or copies) all items to their destination. Any album
art moves along with them. basedir overrides the library base `operation` should be an instance of `util.MoveOperation`.
directory for the destination. The album is stored to the
database, persisting any modifications to its metadata. By default, the album is stored to the database, persisting any
modifications to its metadata. If `store` is `False` however,
the album is not stored automatically, and you'll have to manually
store it after invoking this method.
""" """
basedir = basedir or self._db.directory basedir = basedir or self._db.directory
# Ensure new metadata is available to items for destination # Ensure new metadata is available to items for destination
# computation. # computation.
if store:
self.store() self.store()
# Move items. # Move items.
items = list(self.items()) items = list(self.items())
for item in items: for item in items:
item.move(copy, link, basedir=basedir, with_album=False) item.move(operation, basedir=basedir, with_album=False,
store=store)
# Move art. # Move art.
self.move_art(copy, link) self.move_art(operation)
if store:
self.store() self.store()
def item_dir(self): def item_dir(self):
@ -1054,10 +1119,14 @@ class Album(LibModel):
image = bytestring_path(image) image = bytestring_path(image)
item_dir = item_dir or self.item_dir() item_dir = item_dir or self.item_dir()
filename_tmpl = Template(beets.config['art_filename'].get(unicode)) filename_tmpl = Template(
beets.config['art_filename'].as_str())
subpath = self.evaluate_template(filename_tmpl, True) subpath = self.evaluate_template(filename_tmpl, True)
if beets.config['asciify_paths']: if beets.config['asciify_paths']:
subpath = unidecode(subpath) subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
subpath = util.sanitize_path(subpath, subpath = util.sanitize_path(subpath,
replacements=self._db.replacements) replacements=self._db.replacements)
subpath = bytestring_path(subpath) subpath = bytestring_path(subpath)
@ -1098,9 +1167,11 @@ class Album(LibModel):
plugins.send('art_set', album=self) plugins.send('art_set', album=self)
def store(self): def store(self, fields=None):
"""Update the database with the album information. The album's """Update the database with the album information. The album's
tracks are also updated. tracks are also updated.
:param fields: The fields to be stored. If not specified, all fields
will be.
""" """
# Get modified track fields. # Get modified track fields.
track_updates = {} track_updates = {}
@ -1109,7 +1180,7 @@ class Album(LibModel):
track_updates[key] = self[key] track_updates[key] = self[key]
with self._db.transaction(): with self._db.transaction():
super(Album, self).store() super(Album, self).store(fields)
if track_updates: if track_updates:
for item in self.items(): for item in self.items():
for key, value in track_updates.items(): for key, value in track_updates.items():
@ -1172,7 +1243,8 @@ def parse_query_string(s, model_cls):
The string is split into components using shell-like syntax. The string is split into components using shell-like syntax.
""" """
assert isinstance(s, unicode), u"Query is not unicode: {0!r}".format(s) message = u"Query is not unicode: {0!r}".format(s)
assert isinstance(s, six.text_type), message
try: try:
parts = util.shlex_split(s) parts = util.shlex_split(s)
except ValueError as exc: except ValueError as exc:
@ -1180,6 +1252,19 @@ def parse_query_string(s, model_cls):
return parse_query_parts(parts, model_cls) return parse_query_parts(parts, model_cls)
def _sqlite_bytelower(bytestring):
""" A custom ``bytelower`` sqlite function so we can compare
bytestrings in a semi case insensitive fashion. This is to work
around sqlite builds are that compiled with
``-DSQLITE_LIKE_DOESNT_MATCH_BLOBS``. See
``https://github.com/beetbox/beets/issues/2172`` for details.
"""
if not six.PY2:
return bytestring.lower()
return buffer(bytes(bytestring).lower()) # noqa: F821
# The Library: interface to the database. # The Library: interface to the database.
class Library(dbcore.Database): class Library(dbcore.Database):
@ -1192,9 +1277,8 @@ class Library(dbcore.Database):
path_formats=((PF_KEY_DEFAULT, path_formats=((PF_KEY_DEFAULT,
'$artist/$album/$track $title'),), '$artist/$album/$track $title'),),
replacements=None): replacements=None):
if path != ':memory:': timeout = beets.config['timeout'].as_number()
self.path = bytestring_path(normpath(path)) super(Library, self).__init__(path, timeout=timeout)
super(Library, self).__init__(path)
self.directory = bytestring_path(normpath(directory)) self.directory = bytestring_path(normpath(directory))
self.path_formats = path_formats self.path_formats = path_formats
@ -1202,6 +1286,11 @@ class Library(dbcore.Database):
self._memotable = {} # Used for template substitution performance. self._memotable = {} # Used for template substitution performance.
def _create_connection(self):
conn = super(Library, self)._create_connection()
conn.create_function('bytelower', 1, _sqlite_bytelower)
return conn
# Adding objects to the database. # Adding objects to the database.
def add(self, obj): def add(self, obj):
@ -1248,11 +1337,11 @@ class Library(dbcore.Database):
# Parse the query, if necessary. # Parse the query, if necessary.
try: try:
parsed_sort = None parsed_sort = None
if isinstance(query, basestring): if isinstance(query, six.string_types):
query, parsed_sort = parse_query_string(query, model_cls) query, parsed_sort = parse_query_string(query, model_cls)
elif isinstance(query, (list, tuple)): elif isinstance(query, (list, tuple)):
query, parsed_sort = parse_query_parts(query, model_cls) query, parsed_sort = parse_query_parts(query, model_cls)
except dbcore.query.InvalidQueryArgumentTypeError as exc: except dbcore.query.InvalidQueryArgumentValueError as exc:
raise dbcore.InvalidQueryError(query, exc) raise dbcore.InvalidQueryError(query, exc)
# Any non-null sort specified by the parsed query overrides the # Any non-null sort specified by the parsed query overrides the
@ -1392,22 +1481,24 @@ class DefaultTemplateFunctions(object):
def tmpl_asciify(s): def tmpl_asciify(s):
"""Translate non-ASCII characters to their ASCII equivalents. """Translate non-ASCII characters to their ASCII equivalents.
""" """
return unidecode(s) return util.asciify_path(s, beets.config['path_sep_replace'].as_str())
@staticmethod @staticmethod
def tmpl_time(s, fmt): def tmpl_time(s, fmt):
"""Format a time value using `strftime`. """Format a time value using `strftime`.
""" """
cur_fmt = beets.config['time_format'].get(unicode) cur_fmt = beets.config['time_format'].as_str()
return time.strftime(fmt, time.strptime(s, cur_fmt)) return time.strftime(fmt, time.strptime(s, cur_fmt))
def tmpl_aunique(self, keys=None, disam=None): def tmpl_aunique(self, keys=None, disam=None, bracket=None):
"""Generate a string that is guaranteed to be unique among all """Generate a string that is guaranteed to be unique among all
albums in the library who share the same set of keys. A fields albums in the library who share the same set of keys. A fields
from "disam" is used in the string if one is sufficient to from "disam" is used in the string if one is sufficient to
disambiguate the albums. Otherwise, a fallback opaque value is disambiguate the albums. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names. whitespace-separated lists of field names, while "bracket" is a
pair of characters to be used as brackets surrounding the
disambiguator or empty to have no brackets.
""" """
# Fast paths: no album, no item or library, or memoized value. # Fast paths: no album, no item or library, or memoized value.
if not self.item or not self.lib: if not self.item or not self.lib:
@ -1421,9 +1512,19 @@ class DefaultTemplateFunctions(object):
keys = keys or 'albumartist album' keys = keys or 'albumartist album'
disam = disam or 'albumtype year label catalognum albumdisambig' disam = disam or 'albumtype year label catalognum albumdisambig'
if bracket is None:
bracket = '[]'
keys = keys.split() keys = keys.split()
disam = disam.split() disam = disam.split()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = u''
bracket_r = u''
album = self.lib.get_album(self.item) album = self.lib.get_album(self.item)
if not album: if not album:
# Do nothing for singletons. # Do nothing for singletons.
@ -1456,13 +1557,19 @@ class DefaultTemplateFunctions(object):
else: else:
# No disambiguator distinguished all fields. # No disambiguator distinguished all fields.
res = u' {0}'.format(album.id) res = u' {1}{0}{2}'.format(album.id, bracket_l, bracket_r)
self.lib._memotable[memokey] = res self.lib._memotable[memokey] = res
return res return res
# Flatten disambiguation value into a string. # Flatten disambiguation value into a string.
disam_value = album.formatted(True).get(disambiguator) disam_value = album.formatted(True).get(disambiguator)
res = u' [{0}]'.format(disam_value)
# Return empty string if disambiguator is empty.
if disam_value:
res = u' {1}{0}{2}'.format(disam_value, bracket_l, bracket_r)
else:
res = u''
self.lib._memotable[memokey] = res self.lib._memotable[memokey] = res
return res return res

View file

@ -27,6 +27,7 @@ from copy import copy
from logging import * # noqa from logging import * # noqa
import subprocess import subprocess
import threading import threading
import six
def logsafe(val): def logsafe(val):
@ -42,7 +43,7 @@ def logsafe(val):
example. example.
""" """
# Already Unicode. # Already Unicode.
if isinstance(val, unicode): if isinstance(val, six.text_type):
return val return val
# Bytestring: needs decoding. # Bytestring: needs decoding.
@ -51,16 +52,16 @@ def logsafe(val):
# (a) only do this for paths, if they can be given a distinct # (a) only do this for paths, if they can be given a distinct
# type, and (b) warn the developer if they do this for other # type, and (b) warn the developer if they do this for other
# bytestrings. # bytestrings.
return val.decode('utf8', 'replace') return val.decode('utf-8', 'replace')
# A "problem" object: needs a workaround. # A "problem" object: needs a workaround.
elif isinstance(val, subprocess.CalledProcessError): elif isinstance(val, subprocess.CalledProcessError):
try: try:
return unicode(val) return six.text_type(val)
except UnicodeDecodeError: except UnicodeDecodeError:
# An object with a broken __unicode__ formatter. Use __str__ # An object with a broken __unicode__ formatter. Use __str__
# instead. # instead.
return str(val).decode('utf8', 'replace') return str(val).decode('utf-8', 'replace')
# Other objects are used as-is so field access, etc., still works in # Other objects are used as-is so field access, etc., still works in
# the format string. # the format string.

File diff suppressed because it is too large Load diff

View file

@ -27,6 +27,7 @@ from functools import wraps
import beets import beets
from beets import logging from beets import logging
from beets import mediafile from beets import mediafile
import six
PLUGIN_NAMESPACE = 'beetsplug' PLUGIN_NAMESPACE = 'beetsplug'
@ -54,10 +55,10 @@ class PluginLogFilter(logging.Filter):
def filter(self, record): def filter(self, record):
if hasattr(record.msg, 'msg') and isinstance(record.msg.msg, if hasattr(record.msg, 'msg') and isinstance(record.msg.msg,
basestring): six.string_types):
# A _LogMessage from our hacked-up Logging replacement. # A _LogMessage from our hacked-up Logging replacement.
record.msg.msg = self.prefix + record.msg.msg record.msg.msg = self.prefix + record.msg.msg
elif isinstance(record.msg, basestring): elif isinstance(record.msg, six.string_types):
record.msg = self.prefix + record.msg record.msg = self.prefix + record.msg
return True return True
@ -80,6 +81,7 @@ class BeetsPlugin(object):
self.template_fields = {} self.template_fields = {}
if not self.album_template_fields: if not self.album_template_fields:
self.album_template_fields = {} self.album_template_fields = {}
self.early_import_stages = []
self.import_stages = [] self.import_stages = []
self._log = log.getChild(self.name) self._log = log.getChild(self.name)
@ -93,6 +95,22 @@ class BeetsPlugin(object):
""" """
return () return ()
def _set_stage_log_level(self, stages):
"""Adjust all the stages in `stages` to WARNING logging level.
"""
return [self._set_log_level_and_params(logging.WARNING, stage)
for stage in stages]
def get_early_import_stages(self):
"""Return a list of functions that should be called as importer
pipelines stages early in the pipeline.
The callables are wrapped versions of the functions in
`self.early_import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return self._set_stage_log_level(self.early_import_stages)
def get_import_stages(self): def get_import_stages(self):
"""Return a list of functions that should be called as importer """Return a list of functions that should be called as importer
pipelines stages. pipelines stages.
@ -101,8 +119,7 @@ class BeetsPlugin(object):
`self.import_stages`. Wrapping provides some bookkeeping for the `self.import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING. plugin: specifically, the logging level is adjusted to WARNING.
""" """
return [self._set_log_level_and_params(logging.WARNING, import_stage) return self._set_stage_log_level(self.import_stages)
for import_stage in self.import_stages]
def _set_log_level_and_params(self, base_log_level, func): def _set_log_level_and_params(self, base_log_level, func):
"""Wrap `func` to temporarily set this plugin's logger level to """Wrap `func` to temporarily set this plugin's logger level to
@ -254,7 +271,7 @@ def load_plugins(names=()):
except ImportError as exc: except ImportError as exc:
# Again, this is hacky: # Again, this is hacky:
if exc.args[0].endswith(' ' + name): if exc.args[0].endswith(' ' + name):
log.warn(u'** plugin {0} not found', name) log.warning(u'** plugin {0} not found', name)
else: else:
raise raise
else: else:
@ -263,8 +280,8 @@ def load_plugins(names=()):
and obj != BeetsPlugin and obj not in _classes: and obj != BeetsPlugin and obj not in _classes:
_classes.add(obj) _classes.add(obj)
except: except Exception:
log.warn( log.warning(
u'** error loading plugin {}:\n{}', u'** error loading plugin {}:\n{}',
name, name,
traceback.format_exc(), traceback.format_exc(),
@ -350,41 +367,35 @@ def album_distance(items, album_info, mapping):
def candidates(items, artist, album, va_likely): def candidates(items, artist, album, va_likely):
"""Gets MusicBrainz candidates for an album from each plugin. """Gets MusicBrainz candidates for an album from each plugin.
""" """
out = []
for plugin in find_plugins(): for plugin in find_plugins():
out.extend(plugin.candidates(items, artist, album, va_likely)) for candidate in plugin.candidates(items, artist, album, va_likely):
return out yield candidate
def item_candidates(item, artist, title): def item_candidates(item, artist, title):
"""Gets MusicBrainz candidates for an item from the plugins. """Gets MusicBrainz candidates for an item from the plugins.
""" """
out = []
for plugin in find_plugins(): for plugin in find_plugins():
out.extend(plugin.item_candidates(item, artist, title)) for item_candidate in plugin.item_candidates(item, artist, title):
return out yield item_candidate
def album_for_id(album_id): def album_for_id(album_id):
"""Get AlbumInfo objects for a given ID string. """Get AlbumInfo objects for a given ID string.
""" """
out = []
for plugin in find_plugins(): for plugin in find_plugins():
res = plugin.album_for_id(album_id) album = plugin.album_for_id(album_id)
if res: if album:
out.append(res) yield album
return out
def track_for_id(track_id): def track_for_id(track_id):
"""Get TrackInfo objects for a given ID string. """Get TrackInfo objects for a given ID string.
""" """
out = []
for plugin in find_plugins(): for plugin in find_plugins():
res = plugin.track_for_id(track_id) track = plugin.track_for_id(track_id)
if res: if track:
out.append(res) yield track
return out
def template_funcs(): def template_funcs():
@ -398,6 +409,14 @@ def template_funcs():
return funcs return funcs
def early_import_stages():
"""Get a list of early import stage functions defined by plugins."""
stages = []
for plugin in find_plugins():
stages += plugin.get_early_import_stages()
return stages
def import_stages(): def import_stages():
"""Get a list of import stage functions defined by plugins.""" """Get a list of import stage functions defined by plugins."""
stages = [] stages = []
@ -483,7 +502,64 @@ def sanitize_choices(choices, choices_all):
others = [x for x in choices_all if x not in choices] others = [x for x in choices_all if x not in choices]
res = [] res = []
for s in choices: for s in choices:
if s in list(choices_all) + ['*']: if s not in seen:
if not (s in seen or seen.add(s)): if s in list(choices_all):
res.extend(list(others) if s == '*' else [s]) res.append(s)
elif s == '*':
res.extend(others)
seen.add(s)
return res return res
def sanitize_pairs(pairs, pairs_all):
"""Clean up a single-element mapping configuration attribute as returned
by `confit`'s `Pairs` template: keep only two-element tuples present in
pairs_all, remove duplicate elements, expand ('str', '*') and ('*', '*')
wildcards while keeping the original order. Note that ('*', '*') and
('*', 'whatever') have the same effect.
For example,
>>> sanitize_pairs(
... [('foo', 'baz bar'), ('key', '*'), ('*', '*')],
... [('foo', 'bar'), ('foo', 'baz'), ('foo', 'foobar'),
... ('key', 'value')]
... )
[('foo', 'baz'), ('foo', 'bar'), ('key', 'value'), ('foo', 'foobar')]
"""
pairs_all = list(pairs_all)
seen = set()
others = [x for x in pairs_all if x not in pairs]
res = []
for k, values in pairs:
for v in values.split():
x = (k, v)
if x in pairs_all:
if x not in seen:
seen.add(x)
res.append(x)
elif k == '*':
new = [o for o in others if o not in seen]
seen.update(new)
res.extend(new)
elif v == '*':
new = [o for o in others if o not in seen and o[0] == k]
seen.update(new)
res.extend(new)
return res
def notify_info_yielded(event):
"""Makes a generator send the event 'event' every time it yields.
This decorator is supposed to decorate a generator, but any function
returning an iterable should work.
Each yielded value is passed to plugins using the 'info' parameter of
'send'.
"""
def decorator(generator):
def decorated(*args, **kwargs):
for v in generator(*args, **kwargs):
send(event, info=v)
yield v
return decorated
return decorator

View file

@ -20,7 +20,6 @@ CLI commands are implemented in the ui.commands module.
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import locale
import optparse import optparse
import textwrap import textwrap
import sys import sys
@ -31,6 +30,7 @@ import re
import struct import struct
import traceback import traceback
import os.path import os.path
from six.moves import input
from beets import logging from beets import logging
from beets import library from beets import library
@ -38,9 +38,11 @@ from beets import plugins
from beets import util from beets import util
from beets.util.functemplate import Template from beets.util.functemplate import Template
from beets import config from beets import config
from beets.util import confit from beets.util import confit, as_string
from beets.autotag import mb from beets.autotag import mb
from beets.dbcore import query as db_query from beets.dbcore import query as db_query
from beets.dbcore import db
import six
# On Windows platforms, use colorama to support "ANSI" terminal colors. # On Windows platforms, use colorama to support "ANSI" terminal colors.
if sys.platform == 'win32': if sys.platform == 'win32':
@ -73,51 +75,47 @@ class UserError(Exception):
# Encoding utilities. # Encoding utilities.
def _in_encoding(default=u'utf-8'): def _in_encoding():
"""Get the encoding to use for *inputting* strings from the console. """Get the encoding to use for *inputting* strings from the console.
:param default: the fallback sys.stdin encoding
""" """
return _stream_encoding(sys.stdin)
return config['terminal_encoding'].get() or getattr(sys.stdin, 'encoding',
default)
def _out_encoding(): def _out_encoding():
"""Get the encoding to use for *outputting* strings to the console. """Get the encoding to use for *outputting* strings to the console.
""" """
return _stream_encoding(sys.stdout)
def _stream_encoding(stream, default='utf-8'):
"""A helper for `_in_encoding` and `_out_encoding`: get the stream's
preferred encoding, using a configured override or a default
fallback if neither is not specified.
"""
# Configured override? # Configured override?
encoding = config['terminal_encoding'].get() encoding = config['terminal_encoding'].get()
if encoding: if encoding:
return encoding return encoding
# For testing: When sys.stdout is a StringIO under the test harness, # For testing: When sys.stdout or sys.stdin is a StringIO under the
# it doesn't have an `encoding` attribute. Just use UTF-8. # test harness, it doesn't have an `encoding` attribute. Just use
if not hasattr(sys.stdout, 'encoding'): # UTF-8.
return 'utf8' if not hasattr(stream, 'encoding'):
return default
# Python's guessed output stream encoding, or UTF-8 as a fallback # Python's guessed output stream encoding, or UTF-8 as a fallback
# (e.g., when piped to a file). # (e.g., when piped to a file).
return sys.stdout.encoding or 'utf8' return stream.encoding or default
def _arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf8'
def decargs(arglist): def decargs(arglist):
"""Given a list of command-line argument bytestrings, attempts to """Given a list of command-line argument bytestrings, attempts to
decode them to Unicode strings. decode them to Unicode strings when running under Python 2.
""" """
return [s.decode(_arg_encoding()) for s in arglist] if six.PY2:
return [s.decode(util.arg_encoding()) for s in arglist]
else:
return arglist
def print_(*strings, **kwargs): def print_(*strings, **kwargs):
@ -125,26 +123,36 @@ def print_(*strings, **kwargs):
is not in the terminal's encoding's character set, just silently is not in the terminal's encoding's character set, just silently
replaces it. replaces it.
If the arguments are strings then they're expected to share the same The arguments must be Unicode strings: `unicode` on Python 2; `str` on
type: either bytes or unicode. Python 3.
The `end` keyword argument behaves similarly to the built-in `print` The `end` keyword argument behaves similarly to the built-in `print`
(it defaults to a newline). The value should have the same string (it defaults to a newline).
type as the arguments.
""" """
end = kwargs.get('end') if not strings:
strings = [u'']
assert isinstance(strings[0], six.text_type)
if not strings or isinstance(strings[0], unicode):
txt = u' '.join(strings) txt = u' '.join(strings)
txt += u'\n' if end is None else end txt += kwargs.get('end', u'\n')
# Encode the string and write it to stdout.
if six.PY2:
# On Python 2, sys.stdout expects bytes.
out = txt.encode(_out_encoding(), 'replace')
sys.stdout.write(out)
else: else:
txt = b' '.join(strings) # On Python 3, sys.stdout expects text strings and uses the
txt += b'\n' if end is None else end # exception-throwing encoding error policy. To avoid throwing
# errors and use our configurable encoding override, we use the
# Always send bytes to the stdout stream. # underlying bytes buffer instead.
if isinstance(txt, unicode): if hasattr(sys.stdout, 'buffer'):
txt = txt.encode(_out_encoding(), 'replace') out = txt.encode(_out_encoding(), 'replace')
sys.stdout.buffer.write(out)
sys.stdout.buffer.flush()
else:
# In our test harnesses (e.g., DummyOut), sys.stdout.buffer
# does not exist. We instead just record the text string.
sys.stdout.write(txt) sys.stdout.write(txt)
@ -188,23 +196,26 @@ def should_move(move_opt=None):
# Input prompts. # Input prompts.
def input_(prompt=None): def input_(prompt=None):
"""Like `raw_input`, but decodes the result to a Unicode string. """Like `input`, but decodes the result to a Unicode string.
Raises a UserError if stdin is not available. The prompt is sent to Raises a UserError if stdin is not available. The prompt is sent to
stdout rather than stderr. A printed between the prompt and the stdout rather than stderr. A printed between the prompt and the
input cursor. input cursor.
""" """
# raw_input incorrectly sends prompts to stderr, not stdout, so we # raw_input incorrectly sends prompts to stderr, not stdout, so we
# use print() explicitly to display prompts. # use print_() explicitly to display prompts.
# http://bugs.python.org/issue1927 # http://bugs.python.org/issue1927
if prompt: if prompt:
print_(prompt, end=' ') print_(prompt, end=u' ')
try: try:
resp = raw_input() resp = input()
except EOFError: except EOFError:
raise UserError(u'stdin stream ended while input required') raise UserError(u'stdin stream ended while input required')
if six.PY2:
return resp.decode(_in_encoding(), 'ignore') return resp.decode(_in_encoding(), 'ignore')
else:
return resp
def input_options(options, require=False, prompt=None, fallback_prompt=None, def input_options(options, require=False, prompt=None, fallback_prompt=None,
@ -256,7 +267,7 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
# Mark the option's shortcut letter for display. # Mark the option's shortcut letter for display.
if not require and ( if not require and (
(default is None and not numrange and first) or (default is None and not numrange and first) or
(isinstance(default, basestring) and (isinstance(default, six.string_types) and
found_letter.lower() == default.lower())): found_letter.lower() == default.lower())):
# The first option is the default; mark it. # The first option is the default; mark it.
show_letter = '[%s]' % found_letter.upper() show_letter = '[%s]' % found_letter.upper()
@ -292,11 +303,11 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
prompt_part_lengths = [] prompt_part_lengths = []
if numrange: if numrange:
if isinstance(default, int): if isinstance(default, int):
default_name = unicode(default) default_name = six.text_type(default)
default_name = colorize('action_default', default_name) default_name = colorize('action_default', default_name)
tmpl = '# selection (default %s)' tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name) prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % unicode(default))) prompt_part_lengths.append(len(tmpl % six.text_type(default)))
else: else:
prompt_parts.append('# selection') prompt_parts.append('# selection')
prompt_part_lengths.append(len(prompt_parts[-1])) prompt_part_lengths.append(len(prompt_parts[-1]))
@ -516,7 +527,8 @@ def colorize(color_name, text):
if config['ui']['color']: if config['ui']['color']:
global COLORS global COLORS
if not COLORS: if not COLORS:
COLORS = dict((name, config['ui']['colors'][name].get(unicode)) COLORS = dict((name,
config['ui']['colors'][name].as_str())
for name in COLOR_NAMES) for name in COLOR_NAMES)
# In case a 3rd party plugin is still passing the actual color ('red') # In case a 3rd party plugin is still passing the actual color ('red')
# instead of the abstract color name ('text_error') # instead of the abstract color name ('text_error')
@ -536,10 +548,11 @@ def _colordiff(a, b, highlight='text_highlight',
highlighted intelligently to show differences; other values are highlighted intelligently to show differences; other values are
stringified and highlighted in their entirety. stringified and highlighted in their entirety.
""" """
if not isinstance(a, basestring) or not isinstance(b, basestring): if not isinstance(a, six.string_types) \
or not isinstance(b, six.string_types):
# Non-strings: use ordinary equality. # Non-strings: use ordinary equality.
a = unicode(a) a = six.text_type(a)
b = unicode(b) b = six.text_type(b)
if a == b: if a == b:
return a, b return a, b
else: else:
@ -587,7 +600,7 @@ def colordiff(a, b, highlight='text_highlight'):
if config['ui']['color']: if config['ui']['color']:
return _colordiff(a, b, highlight) return _colordiff(a, b, highlight)
else: else:
return unicode(a), unicode(b) return six.text_type(a), six.text_type(b)
def get_path_formats(subview=None): def get_path_formats(subview=None):
@ -598,7 +611,7 @@ def get_path_formats(subview=None):
subview = subview or config['paths'] subview = subview or config['paths']
for query, view in subview.items(): for query, view in subview.items():
query = PF_KEY_QUERIES.get(query, query) # Expand common queries. query = PF_KEY_QUERIES.get(query, query) # Expand common queries.
path_formats.append((query, Template(view.get(unicode)))) path_formats.append((query, Template(view.as_str())))
return path_formats return path_formats
@ -666,7 +679,7 @@ def _field_diff(field, old, new):
# For strings, highlight changes. For others, colorize the whole # For strings, highlight changes. For others, colorize the whole
# thing. # thing.
if isinstance(oldval, basestring): if isinstance(oldval, six.string_types):
oldstr, newstr = colordiff(oldval, newstr) oldstr, newstr = colordiff(oldval, newstr)
else: else:
oldstr = colorize('text_error', oldstr) oldstr = colorize('text_error', oldstr)
@ -757,6 +770,34 @@ def show_path_changes(path_changes):
log.info(u'{0} {1} -> {2}', source, ' ' * pad, dest) log.info(u'{0} {1} -> {2}', source, ' ' * pad, dest)
# Helper functions for option parsing.
def _store_dict(option, opt_str, value, parser):
"""Custom action callback to parse options which have ``key=value``
pairs as values. All such pairs passed for this option are
aggregated into a dictionary.
"""
dest = option.dest
option_values = getattr(parser.values, dest, None)
if option_values is None:
# This is the first supplied ``key=value`` pair of option.
# Initialize empty dictionary and get a reference to it.
setattr(parser.values, dest, dict())
option_values = getattr(parser.values, dest)
try:
key, value = map(lambda s: util.text_string(s), value.split('='))
if not (key and value):
raise ValueError
except ValueError:
raise UserError(
"supplied argument `{0}' is not of the form `key=value'"
.format(value))
option_values[key] = value
class CommonOptionsParser(optparse.OptionParser, object): class CommonOptionsParser(optparse.OptionParser, object):
"""Offers a simple way to add common formatting options. """Offers a simple way to add common formatting options.
@ -799,7 +840,14 @@ class CommonOptionsParser(optparse.OptionParser, object):
if store_true: if store_true:
setattr(parser.values, option.dest, True) setattr(parser.values, option.dest, True)
value = fmt or value and unicode(value) or '' # Use the explicitly specified format, or the string from the option.
if fmt:
value = fmt
elif value:
value, = decargs([value])
else:
value = u''
parser.values.format = value parser.values.format = value
if target: if target:
config[target._format_config_key].set(value) config[target._format_config_key].set(value)
@ -830,7 +878,7 @@ class CommonOptionsParser(optparse.OptionParser, object):
""" """
path = optparse.Option(*flags, nargs=0, action='callback', path = optparse.Option(*flags, nargs=0, action='callback',
callback=self._set_format, callback=self._set_format,
callback_kwargs={'fmt': '$path', callback_kwargs={'fmt': u'$path',
'store_true': True}, 'store_true': True},
help=u'print paths for matched items or albums') help=u'print paths for matched items or albums')
self.add_option(path) self.add_option(path)
@ -852,7 +900,7 @@ class CommonOptionsParser(optparse.OptionParser, object):
""" """
kwargs = {} kwargs = {}
if target: if target:
if isinstance(target, basestring): if isinstance(target, six.string_types):
target = {'item': library.Item, target = {'item': library.Item,
'album': library.Album}[target] 'album': library.Album}[target]
kwargs['target'] = target kwargs['target'] = target
@ -911,7 +959,7 @@ class Subcommand(object):
def root_parser(self, root_parser): def root_parser(self, root_parser):
self._root_parser = root_parser self._root_parser = root_parser
self.parser.prog = '{0} {1}'.format( self.parser.prog = '{0} {1}'.format(
root_parser.get_prog_name().decode('utf8'), self.name) as_string(root_parser.get_prog_name()), self.name)
class SubcommandsOptionParser(CommonOptionsParser): class SubcommandsOptionParser(CommonOptionsParser):
@ -1044,54 +1092,24 @@ class SubcommandsOptionParser(CommonOptionsParser):
optparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',) optparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',)
def vararg_callback(option, opt_str, value, parser):
"""Callback for an option with variable arguments.
Manually collect arguments right of a callback-action
option (ie. with action="callback"), and add the resulting
list to the destination var.
Usage:
parser.add_option("-c", "--callback", dest="vararg_attr",
action="callback", callback=vararg_callback)
Details:
http://docs.python.org/2/library/optparse.html#callback-example-6-variable
-arguments
"""
value = [value]
def floatable(str):
try:
float(str)
return True
except ValueError:
return False
for arg in parser.rargs:
# stop on --foo like options
if arg[:2] == "--" and len(arg) > 2:
break
# stop on -a, but not on -3 or -3.0
if arg[:1] == "-" and len(arg) > 1 and not floatable(arg):
break
value.append(arg)
del parser.rargs[:len(value) - 1]
setattr(parser.values, option.dest, value)
# The main entry point and bootstrapping. # The main entry point and bootstrapping.
def _load_plugins(config): def _load_plugins(config):
"""Load the plugins specified in the configuration. """Load the plugins specified in the configuration.
""" """
paths = config['pluginpath'].get(confit.StrSeq(split=False)) paths = config['pluginpath'].as_str_seq(split=False)
paths = map(util.normpath, paths) paths = [util.normpath(p) for p in paths]
log.debug(u'plugin paths: {0}', util.displayable_path(paths)) log.debug(u'plugin paths: {0}', util.displayable_path(paths))
# On Python 3, the search paths need to be unicode.
paths = [util.py3_path(p) for p in paths]
# Extend the `beetsplug` package to include the plugin paths.
import beetsplug import beetsplug
beetsplug.__path__ = paths + beetsplug.__path__ beetsplug.__path__ = paths + beetsplug.__path__
# For backwards compatibility.
# For backwards compatibility, also support plugin paths that
# *contain* a `beetsplug` package.
sys.path += paths sys.path += paths
plugins.load_plugins(config['plugins'].as_str_seq()) plugins.load_plugins(config['plugins'].as_str_seq())
@ -1133,9 +1151,11 @@ def _configure(options):
# special handling lets specified plugins get loaded before we # special handling lets specified plugins get loaded before we
# finish parsing the command line. # finish parsing the command line.
if getattr(options, 'config', None) is not None: if getattr(options, 'config', None) is not None:
config_path = options.config overlay_path = options.config
del options.config del options.config
config.set_file(config_path) config.set_file(overlay_path)
else:
overlay_path = None
config.set_args(options) config.set_args(options)
# Configure the logger. # Configure the logger.
@ -1144,27 +1164,9 @@ def _configure(options):
else: else:
log.set_global_level(logging.INFO) log.set_global_level(logging.INFO)
# Ensure compatibility with old (top-level) color configuration. if overlay_path:
# Deprecation msg to motivate user to switch to config['ui']['color]. log.debug(u'overlaying configuration: {0}',
if config['color'].exists(): util.displayable_path(overlay_path))
log.warning(u'Warning: top-level configuration of `color` '
u'is deprecated. Configure color use under `ui`. '
u'See documentation for more info.')
config['ui']['color'].set(config['color'].get(bool))
# Compatibility from list_format_{item,album} to format_{item,album}
for elem in ('item', 'album'):
old_key = 'list_format_{0}'.format(elem)
if config[old_key].exists():
new_key = 'format_{0}'.format(elem)
log.warning(
u'Warning: configuration uses "{0}" which is deprecated'
u' in favor of "{1}" now that it affects all commands. '
u'See changelog & documentation.',
old_key,
new_key,
)
config[new_key].set(config[old_key])
config_path = config.user_config_path() config_path = config.user_config_path()
if os.path.isfile(config_path): if os.path.isfile(config_path):
@ -1182,7 +1184,7 @@ def _configure(options):
def _open_library(config): def _open_library(config):
"""Create a new library instance from the configuration. """Create a new library instance from the configuration.
""" """
dbpath = config['library'].as_filename() dbpath = util.bytestring_path(config['library'].as_filename())
try: try:
lib = library.Library( lib = library.Library(
dbpath, dbpath,
@ -1233,6 +1235,7 @@ def _raw_main(args, lib=None):
from beets.ui.commands import config_edit from beets.ui.commands import config_edit
return config_edit() return config_edit()
test_lib = bool(lib)
subcommands, plugins, lib = _setup(options, lib) subcommands, plugins, lib = _setup(options, lib)
parser.add_subcommand(*subcommands) parser.add_subcommand(*subcommands)
@ -1240,6 +1243,9 @@ def _raw_main(args, lib=None):
subcommand.func(lib, suboptions, subargs) subcommand.func(lib, suboptions, subargs)
plugins.send('cli_exit', lib=lib) plugins.send('cli_exit', lib=lib)
if not test_lib:
# Clean up the library unless it came from the test harness.
lib._close()
def main(args=None): def main(args=None):
@ -1270,9 +1276,16 @@ def main(args=None):
except IOError as exc: except IOError as exc:
if exc.errno == errno.EPIPE: if exc.errno == errno.EPIPE:
# "Broken pipe". End silently. # "Broken pipe". End silently.
pass sys.stderr.close()
else: else:
raise raise
except KeyboardInterrupt: except KeyboardInterrupt:
# Silently ignore ^C except in verbose mode. # Silently ignore ^C except in verbose mode.
log.debug(u'{}', traceback.format_exc()) log.debug(u'{}', traceback.format_exc())
except db.DBAccessError as exc:
log.error(
u'database access error: {0}\n'
u'the library file might have a permissions problem',
exc
)
sys.exit(1)

View file

@ -21,6 +21,7 @@ from __future__ import division, absolute_import, print_function
import os import os
import re import re
from platform import python_version
from collections import namedtuple, Counter from collections import namedtuple, Counter
from itertools import chain from itertools import chain
@ -33,14 +34,17 @@ from beets.autotag import hooks
from beets import plugins from beets import plugins
from beets import importer from beets import importer
from beets import util from beets import util
from beets.util import syspath, normpath, ancestry, displayable_path from beets.util import syspath, normpath, ancestry, displayable_path, \
MoveOperation
from beets import library from beets import library
from beets import config from beets import config
from beets import logging from beets import logging
from beets.util.confit import _package_path from beets.util.confit import _package_path
import six
from . import _store_dict
VARIOUS_ARTISTS = u'Various Artists' VARIOUS_ARTISTS = u'Various Artists'
PromptChoice = namedtuple('ExtraChoice', ['short', 'long', 'callback']) PromptChoice = namedtuple('PromptChoice', ['short', 'long', 'callback'])
# Global logger. # Global logger.
log = logging.getLogger('beets') log = logging.getLogger('beets')
@ -82,16 +86,16 @@ def _do_query(lib, query, album, also_items=True):
def _print_keys(query): def _print_keys(query):
"""Given a SQLite query result, print the `key` field of each """Given a SQLite query result, print the `key` field of each
returned row, with identation of 2 spaces. returned row, with indentation of 2 spaces.
""" """
for row in query: for row in query:
print_(' ' * 2 + row['key']) print_(u' ' * 2 + row['key'])
def fields_func(lib, opts, args): def fields_func(lib, opts, args):
def _print_rows(names): def _print_rows(names):
names.sort() names.sort()
print_(" " + "\n ".join(names)) print_(u' ' + u'\n '.join(names))
print_(u"Item fields:") print_(u"Item fields:")
_print_rows(library.Item.all_keys()) _print_rows(library.Item.all_keys())
@ -156,14 +160,14 @@ def disambig_string(info):
if isinstance(info, hooks.AlbumInfo): if isinstance(info, hooks.AlbumInfo):
if info.media: if info.media:
if info.mediums > 1: if info.mediums and info.mediums > 1:
disambig.append(u'{0}x{1}'.format( disambig.append(u'{0}x{1}'.format(
info.mediums, info.media info.mediums, info.media
)) ))
else: else:
disambig.append(info.media) disambig.append(info.media)
if info.year: if info.year:
disambig.append(unicode(info.year)) disambig.append(six.text_type(info.year))
if info.country: if info.country:
disambig.append(info.country) disambig.append(info.country)
if info.label: if info.label:
@ -233,12 +237,12 @@ def show_change(cur_artist, cur_album, match):
medium = track_info.disc medium = track_info.disc
mediums = track_info.disctotal mediums = track_info.disctotal
if config['per_disc_numbering']: if config['per_disc_numbering']:
if mediums > 1: if mediums and mediums > 1:
return u'{0}-{1}'.format(medium, medium_index) return u'{0}-{1}'.format(medium, medium_index)
else: else:
return unicode(medium_index) return six.text_type(medium_index or index)
else: else:
return unicode(index) return six.text_type(index)
# Identify the album in question. # Identify the album in question.
if cur_artist != match.info.artist or \ if cur_artist != match.info.artist or \
@ -279,7 +283,7 @@ def show_change(cur_artist, cur_album, match):
print_(' '.join(info)) print_(' '.join(info))
# Tracks. # Tracks.
pairs = match.mapping.items() pairs = list(match.mapping.items())
pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index) pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index)
# Build up LHS and RHS for track difference display. The `lines` list # Build up LHS and RHS for track difference display. The `lines` list
@ -493,7 +497,7 @@ def _summary_judgment(rec):
def choose_candidate(candidates, singleton, rec, cur_artist=None, def choose_candidate(candidates, singleton, rec, cur_artist=None,
cur_album=None, item=None, itemcount=None, cur_album=None, item=None, itemcount=None,
extra_choices=[]): choices=[]):
"""Given a sorted list of candidates, ask the user for a selection """Given a sorted list of candidates, ask the user for a selection
of which candidate to use. Applies to both full albums and of which candidate to use. Applies to both full albums and
singletons (tracks). Candidates are either AlbumMatch or TrackMatch singletons (tracks). Candidates are either AlbumMatch or TrackMatch
@ -501,16 +505,12 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
`cur_album`, and `itemcount` must be provided. For singletons, `cur_album`, and `itemcount` must be provided. For singletons,
`item` must be provided. `item` must be provided.
`extra_choices` is a list of `PromptChoice`s, containg the choices `choices` is a list of `PromptChoice`s to be used in each prompt.
appended by the plugins after receiving the `before_choose_candidate`
event. If not empty, the choices are appended to the prompt presented
to the user.
Returns one of the following: Returns one of the following:
* the result of the choice, which may be SKIP, ASIS, TRACKS, or MANUAL * the result of the choice, which may be SKIP or ASIS
* a candidate (an AlbumMatch/TrackMatch object) * a candidate (an AlbumMatch/TrackMatch object)
* the short letter of a `PromptChoice` (if the user selected one of * a chosen `PromptChoice` from `choices`
the `extra_choices`).
""" """
# Sanity check. # Sanity check.
if singleton: if singleton:
@ -519,41 +519,22 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
assert cur_artist is not None assert cur_artist is not None
assert cur_album is not None assert cur_album is not None
# Build helper variables for extra choices. # Build helper variables for the prompt choices.
extra_opts = tuple(c.long for c in extra_choices) choice_opts = tuple(c.long for c in choices)
extra_actions = tuple(c.short for c in extra_choices) choice_actions = {c.short: c for c in choices}
# Zero candidates. # Zero candidates.
if not candidates: if not candidates:
if singleton: if singleton:
print_(u"No matching recordings found.") print_(u"No matching recordings found.")
opts = (u'Use as-is', u'Skip', u'Enter search', u'enter Id',
u'aBort')
else: else:
print_(u"No matching release found for {0} tracks." print_(u"No matching release found for {0} tracks."
.format(itemcount)) .format(itemcount))
print_(u'For help, see: ' print_(u'For help, see: '
u'http://beets.readthedocs.org/en/latest/faq.html#nomatch') u'http://beets.readthedocs.org/en/latest/faq.html#nomatch')
opts = (u'Use as-is', u'as Tracks', u'Group albums', u'Skip', sel = ui.input_options(choice_opts)
u'Enter search', u'enter Id', u'aBort') if sel in choice_actions:
sel = ui.input_options(opts + extra_opts) return choice_actions[sel]
if sel == u'u':
return importer.action.ASIS
elif sel == u't':
assert not singleton
return importer.action.TRACKS
elif sel == u'e':
return importer.action.MANUAL
elif sel == u's':
return importer.action.SKIP
elif sel == u'b':
raise importer.ImportAbort()
elif sel == u'i':
return importer.action.MANUAL_ID
elif sel == u'g':
return importer.action.ALBUMS
elif sel in extra_actions:
return sel
else: else:
assert False assert False
@ -601,33 +582,12 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
print_(u' '.join(line)) print_(u' '.join(line))
# Ask the user for a choice. # Ask the user for a choice.
if singleton: sel = ui.input_options(choice_opts,
opts = (u'Skip', u'Use as-is', u'Enter search', u'enter Id',
u'aBort')
else:
opts = (u'Skip', u'Use as-is', u'as Tracks', u'Group albums',
u'Enter search', u'enter Id', u'aBort')
sel = ui.input_options(opts + extra_opts,
numrange=(1, len(candidates))) numrange=(1, len(candidates)))
if sel == u's': if sel == u'm':
return importer.action.SKIP
elif sel == u'u':
return importer.action.ASIS
elif sel == u'm':
pass pass
elif sel == u'e': elif sel in choice_actions:
return importer.action.MANUAL return choice_actions[sel]
elif sel == u't':
assert not singleton
return importer.action.TRACKS
elif sel == u'b':
raise importer.ImportAbort()
elif sel == u'i':
return importer.action.MANUAL_ID
elif sel == u'g':
return importer.action.ALBUMS
elif sel in extra_actions:
return sel
else: # Numerical selection. else: # Numerical selection.
match = candidates[sel - 1] match = candidates[sel - 1]
if sel != 1: if sel != 1:
@ -647,13 +607,6 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
return match return match
# Ask for confirmation. # Ask for confirmation.
if singleton:
opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is',
u'Enter search', u'enter Id', u'aBort')
else:
opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is',
u'as Tracks', u'Group albums', u'Enter search',
u'enter Id', u'aBort')
default = config['import']['default_action'].as_choice({ default = config['import']['default_action'].as_choice({
u'apply': u'a', u'apply': u'a',
u'skip': u's', u'skip': u's',
@ -662,43 +615,57 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
}) })
if default is None: if default is None:
require = True require = True
sel = ui.input_options(opts + extra_opts, require=require, # Bell ring when user interaction is needed.
default=default) if config['import']['bell']:
ui.print_(u'\a', end=u'')
sel = ui.input_options((u'Apply', u'More candidates') + choice_opts,
require=require, default=default)
if sel == u'a': if sel == u'a':
return match return match
elif sel == u'g': elif sel in choice_actions:
return importer.action.ALBUMS return choice_actions[sel]
elif sel == u's':
return importer.action.SKIP
elif sel == u'u':
return importer.action.ASIS
elif sel == u't':
assert not singleton
return importer.action.TRACKS
elif sel == u'e':
return importer.action.MANUAL
elif sel == u'b':
raise importer.ImportAbort()
elif sel == u'i':
return importer.action.MANUAL_ID
elif sel in extra_actions:
return sel
def manual_search(singleton): def manual_search(session, task):
"""Input either an artist and album (for full albums) or artist and """Get a new `Proposal` using manual search criteria.
Input either an artist and album (for full albums) or artist and
track name (for singletons) for manual search. track name (for singletons) for manual search.
""" """
artist = input_(u'Artist:') artist = input_(u'Artist:').strip()
name = input_(u'Track:' if singleton else u'Album:') name = input_(u'Album:' if task.is_album else u'Track:').strip()
return artist.strip(), name.strip()
if task.is_album:
_, _, prop = autotag.tag_album(
task.items, artist, name
)
return prop
else:
return autotag.tag_item(task.item, artist, name)
def manual_id(singleton): def manual_id(session, task):
"""Input an ID, either for an album ("release") or a track ("recording"). """Get a new `Proposal` using a manually-entered ID.
Input an ID, either for an album ("release") or a track ("recording").
""" """
prompt = u'Enter {0} ID:'.format(u'recording' if singleton else u'release') prompt = u'Enter {0} ID:'.format(u'release' if task.is_album
return input_(prompt).strip() else u'recording')
search_id = input_(prompt).strip()
if task.is_album:
_, _, prop = autotag.tag_album(
task.items, search_ids=search_id.split()
)
return prop
else:
return autotag.tag_item(task.item, search_ids=search_id.split())
def abort_action(session, task):
"""A prompt choice callback that aborts the importer.
"""
raise importer.ImportAbort()
class TerminalImportSession(importer.ImportSession): class TerminalImportSession(importer.ImportSession):
@ -724,42 +691,34 @@ class TerminalImportSession(importer.ImportSession):
return action return action
# Loop until we have a choice. # Loop until we have a choice.
candidates, rec = task.candidates, task.rec
while True: while True:
# Gather extra choices from plugins. # Ask for a choice from the user. The result of
extra_choices = self._get_plugin_choices(task) # `choose_candidate` may be an `importer.action`, an
extra_ops = {c.short: c.callback for c in extra_choices} # `AlbumMatch` object for a specific selection, or a
# `PromptChoice`.
# Ask for a choice from the user. choices = self._get_choices(task)
choice = choose_candidate( choice = choose_candidate(
candidates, False, rec, task.cur_artist, task.cur_album, task.candidates, False, task.rec, task.cur_artist,
itemcount=len(task.items), extra_choices=extra_choices task.cur_album, itemcount=len(task.items), choices=choices
) )
# Choose which tags to use. # Basic choices that require no more action here.
if choice in (importer.action.SKIP, importer.action.ASIS, if choice in (importer.action.SKIP, importer.action.ASIS):
importer.action.TRACKS, importer.action.ALBUMS):
# Pass selection to main control flow. # Pass selection to main control flow.
return choice return choice
elif choice is importer.action.MANUAL:
# Try again with manual search terms. # Plugin-provided choices. We invoke the associated callback
search_artist, search_album = manual_search(False) # function.
_, _, candidates, rec = autotag.tag_album( elif choice in choices:
task.items, search_artist, search_album post_choice = choice.callback(self, task)
)
elif choice is importer.action.MANUAL_ID:
# Try a manually-entered ID.
search_id = manual_id(False)
if search_id:
_, _, candidates, rec = autotag.tag_album(
task.items, search_ids=search_id.split()
)
elif choice in extra_ops.keys():
# Allow extra ops to automatically set the post-choice.
post_choice = extra_ops[choice](self, task)
if isinstance(post_choice, importer.action): if isinstance(post_choice, importer.action):
# MANUAL and MANUAL_ID have no effect, even if returned.
return post_choice return post_choice
elif isinstance(post_choice, autotag.Proposal):
# Use the new candidates and continue around the loop.
task.candidates = post_choice.candidates
task.rec = post_choice.recommendation
# Otherwise, we have a specific match selection.
else: else:
# We have a candidate! Finish tagging. Here, choice is an # We have a candidate! Finish tagging. Here, choice is an
# AlbumMatch object. # AlbumMatch object.
@ -771,7 +730,7 @@ class TerminalImportSession(importer.ImportSession):
either an action constant or a TrackMatch object. either an action constant or a TrackMatch object.
""" """
print_() print_()
print_(task.item.path) print_(displayable_path(task.item.path))
candidates, rec = task.candidates, task.rec candidates, rec = task.candidates, task.rec
# Take immediate action if appropriate. # Take immediate action if appropriate.
@ -784,34 +743,22 @@ class TerminalImportSession(importer.ImportSession):
return action return action
while True: while True:
extra_choices = self._get_plugin_choices(task)
extra_ops = {c.short: c.callback for c in extra_choices}
# Ask for a choice. # Ask for a choice.
choices = self._get_choices(task)
choice = choose_candidate(candidates, True, rec, item=task.item, choice = choose_candidate(candidates, True, rec, item=task.item,
extra_choices=extra_choices) choices=choices)
if choice in (importer.action.SKIP, importer.action.ASIS): if choice in (importer.action.SKIP, importer.action.ASIS):
return choice return choice
elif choice == importer.action.TRACKS:
assert False # TRACKS is only legal for albums. elif choice in choices:
elif choice == importer.action.MANUAL: post_choice = choice.callback(self, task)
# Continue in the loop with a new set of candidates.
search_artist, search_title = manual_search(True)
candidates, rec = autotag.tag_item(task.item, search_artist,
search_title)
elif choice == importer.action.MANUAL_ID:
# Ask for a track ID.
search_id = manual_id(True)
if search_id:
candidates, rec = autotag.tag_item(
task.item, search_ids=search_id.split())
elif choice in extra_ops.keys():
# Allow extra ops to automatically set the post-choice.
post_choice = extra_ops[choice](self, task)
if isinstance(post_choice, importer.action): if isinstance(post_choice, importer.action):
# MANUAL and MANUAL_ID have no effect, even if returned.
return post_choice return post_choice
elif isinstance(post_choice, autotag.Proposal):
candidates = post_choice.candidates
rec = post_choice.recommendation
else: else:
# Chose a candidate. # Chose a candidate.
assert isinstance(choice, autotag.TrackMatch) assert isinstance(choice, autotag.TrackMatch)
@ -821,7 +768,7 @@ class TerminalImportSession(importer.ImportSession):
"""Decide what to do when a new album or item seems similar to one """Decide what to do when a new album or item seems similar to one
that's already in the library. that's already in the library.
""" """
log.warn(u"This {0} is already in the library!", log.warning(u"This {0} is already in the library!",
(u"album" if task.is_album else u"item")) (u"album" if task.is_album else u"item"))
if config['import']['quiet']: if config['import']['quiet']:
@ -843,7 +790,7 @@ class TerminalImportSession(importer.ImportSession):
)) ))
sel = ui.input_options( sel = ui.input_options(
(u'Skip new', u'Keep both', u'Remove old') (u'Skip new', u'Keep both', u'Remove old', u'Merge all')
) )
if sel == u's': if sel == u's':
@ -855,6 +802,8 @@ class TerminalImportSession(importer.ImportSession):
elif sel == u'r': elif sel == u'r':
# Remove old. # Remove old.
task.should_remove_duplicates = True task.should_remove_duplicates = True
elif sel == u'm':
task.should_merge_duplicates = True
else: else:
assert False assert False
@ -863,8 +812,10 @@ class TerminalImportSession(importer.ImportSession):
u"was interrupted. Resume (Y/n)?" u"was interrupted. Resume (Y/n)?"
.format(displayable_path(path))) .format(displayable_path(path)))
def _get_plugin_choices(self, task): def _get_choices(self, task):
"""Get the extra choices appended to the plugins to the ui prompt. """Get the list of prompt choices that should be presented to the
user. This consists of both built-in choices and ones provided by
plugins.
The `before_choose_candidate` event is sent to the plugins, with The `before_choose_candidate` event is sent to the plugins, with
session and task as its parameters. Plugins are responsible for session and task as its parameters. Plugins are responsible for
@ -877,20 +828,37 @@ class TerminalImportSession(importer.ImportSession):
Returns a list of `PromptChoice`s. Returns a list of `PromptChoice`s.
""" """
# Standard, built-in choices.
choices = [
PromptChoice(u's', u'Skip',
lambda s, t: importer.action.SKIP),
PromptChoice(u'u', u'Use as-is',
lambda s, t: importer.action.ASIS)
]
if task.is_album:
choices += [
PromptChoice(u't', u'as Tracks',
lambda s, t: importer.action.TRACKS),
PromptChoice(u'g', u'Group albums',
lambda s, t: importer.action.ALBUMS),
]
choices += [
PromptChoice(u'e', u'Enter search', manual_search),
PromptChoice(u'i', u'enter Id', manual_id),
PromptChoice(u'b', u'aBort', abort_action),
]
# Send the before_choose_candidate event and flatten list. # Send the before_choose_candidate event and flatten list.
extra_choices = list(chain(*plugins.send('before_choose_candidate', extra_choices = list(chain(*plugins.send('before_choose_candidate',
session=self, task=task))) session=self, task=task)))
# Take into account default options, for duplicate checking.
all_choices = [PromptChoice(u'a', u'Apply', None),
PromptChoice(u's', u'Skip', None),
PromptChoice(u'u', u'Use as-is', None),
PromptChoice(u't', u'as Tracks', None),
PromptChoice(u'g', u'Group albums', None),
PromptChoice(u'e', u'Enter search', None),
PromptChoice(u'i', u'enter Id', None),
PromptChoice(u'b', u'aBort', None)] +\
extra_choices
# Add a "dummy" choice for the other baked-in option, for
# duplicate checking.
all_choices = [
PromptChoice(u'a', u'Apply', None),
] + choices + extra_choices
# Check for conflicts.
short_letters = [c.short for c in all_choices] short_letters = [c.short for c in all_choices]
if len(short_letters) != len(set(short_letters)): if len(short_letters) != len(set(short_letters)):
# Duplicate short letter has been found. # Duplicate short letter has been found.
@ -900,11 +868,12 @@ class TerminalImportSession(importer.ImportSession):
# Keep the first of the choices, removing the rest. # Keep the first of the choices, removing the rest.
dup_choices = [c for c in all_choices if c.short == short] dup_choices = [c for c in all_choices if c.short == short]
for c in dup_choices[1:]: for c in dup_choices[1:]:
log.warn(u"Prompt choice '{0}' removed due to conflict " log.warning(u"Prompt choice '{0}' removed due to conflict "
u"with '{1}' (short letter: '{2}')", u"with '{1}' (short letter: '{2}')",
c.long, dup_choices[0].long, c.short) c.long, dup_choices[0].long, c.short)
extra_choices.remove(c) extra_choices.remove(c)
return extra_choices
return choices + extra_choices
# The import command. # The import command.
@ -964,6 +933,13 @@ def import_func(lib, opts, args):
if not paths: if not paths:
raise ui.UserError(u'no path specified') raise ui.UserError(u'no path specified')
# On Python 2, we get filenames as raw bytes, which is what we
# need. On Python 3, we need to undo the "helpful" conversion to
# Unicode strings to get the real bytestring filename.
if not six.PY2:
paths = [p.encode(util.arg_encoding(), 'surrogateescape')
for p in paths]
import_files(lib, paths, query) import_files(lib, paths, query)
@ -978,6 +954,10 @@ import_cmd.parser.add_option(
u'-C', u'--nocopy', action='store_false', dest='copy', u'-C', u'--nocopy', action='store_false', dest='copy',
help=u"don't copy tracks (opposite of -c)" help=u"don't copy tracks (opposite of -c)"
) )
import_cmd.parser.add_option(
u'-m', u'--move', action='store_true', dest='move',
help=u"move tracks into the library (overrides -c)"
)
import_cmd.parser.add_option( import_cmd.parser.add_option(
u'-w', u'--write', action='store_true', default=None, u'-w', u'--write', action='store_true', default=None,
help=u"write new metadata to files' tags (default)" help=u"write new metadata to files' tags (default)"
@ -1030,6 +1010,10 @@ import_cmd.parser.add_option(
u'-I', u'--noincremental', dest='incremental', action='store_false', u'-I', u'--noincremental', dest='incremental', action='store_false',
help=u'do not skip already-imported directories' help=u'do not skip already-imported directories'
) )
import_cmd.parser.add_option(
u'--from-scratch', dest='from_scratch', action='store_true',
help=u'erase existing metadata before applying new metadata'
)
import_cmd.parser.add_option( import_cmd.parser.add_option(
u'--flat', dest='flat', action='store_true', u'--flat', dest='flat', action='store_true',
help=u'import an entire tree as a single album' help=u'import an entire tree as a single album'
@ -1044,16 +1028,22 @@ import_cmd.parser.add_option(
) )
import_cmd.parser.add_option( import_cmd.parser.add_option(
u'-S', u'--search-id', dest='search_ids', action='append', u'-S', u'--search-id', dest='search_ids', action='append',
metavar='BACKEND_ID', metavar='ID',
help=u'restrict matching to a specific metadata backend ID' help=u'restrict matching to a specific metadata backend ID'
) )
import_cmd.parser.add_option(
u'--set', dest='set_fields', action='callback',
callback=_store_dict,
metavar='FIELD=VALUE',
help=u'set the given fields to the supplied values'
)
import_cmd.func = import_func import_cmd.func = import_func
default_commands.append(import_cmd) default_commands.append(import_cmd)
# list: Query and show library contents. # list: Query and show library contents.
def list_items(lib, query, album, fmt=''): def list_items(lib, query, album, fmt=u''):
"""Print out items in lib matching query. If album, then search for """Print out items in lib matching query. If album, then search for
albums instead of single items. albums instead of single items.
""" """
@ -1079,11 +1069,18 @@ default_commands.append(list_cmd)
# update: Update library contents according to on-disk tags. # update: Update library contents according to on-disk tags.
def update_items(lib, query, album, move, pretend): def update_items(lib, query, album, move, pretend, fields):
"""For all the items matched by the query, update the library to """For all the items matched by the query, update the library to
reflect the item's embedded tags. reflect the item's embedded tags.
:param fields: The fields to be stored. If not specified, all fields will
be.
""" """
with lib.transaction(): with lib.transaction():
if move and fields is not None and 'path' not in fields:
# Special case: if an item needs to be moved, the path field has to
# updated; otherwise the new path will not be reflected in the
# database.
fields.append('path')
items, _ = _do_query(lib, query, album) items, _ = _do_query(lib, query, album)
# Walk through the items and pick up their changes. # Walk through the items and pick up their changes.
@ -1122,24 +1119,25 @@ def update_items(lib, query, album, move, pretend):
item._dirty.discard(u'albumartist') item._dirty.discard(u'albumartist')
# Check for and display changes. # Check for and display changes.
changed = ui.show_model_changes(item, changed = ui.show_model_changes(
fields=library.Item._media_fields) item,
fields=fields or library.Item._media_fields)
# Save changes. # Save changes.
if not pretend: if not pretend:
if changed: if changed:
# Move the item if it's in the library. # Move the item if it's in the library.
if move and lib.directory in ancestry(item.path): if move and lib.directory in ancestry(item.path):
item.move() item.move(store=False)
item.store() item.store(fields=fields)
affected_albums.add(item.album_id) affected_albums.add(item.album_id)
else: else:
# The file's mtime was different, but there were no # The file's mtime was different, but there were no
# changes to the metadata. Store the new mtime, # changes to the metadata. Store the new mtime,
# which is set in the call to read(), so we don't # which is set in the call to read(), so we don't
# check this again in the future. # check this again in the future.
item.store() item.store(fields=fields)
# Skip album changes while pretending. # Skip album changes while pretending.
if pretend: if pretend:
@ -1158,17 +1156,24 @@ def update_items(lib, query, album, move, pretend):
# Update album structure to reflect an item in it. # Update album structure to reflect an item in it.
for key in library.Album.item_keys: for key in library.Album.item_keys:
album[key] = first_item[key] album[key] = first_item[key]
album.store() album.store(fields=fields)
# Move album art (and any inconsistent items). # Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path): if move and lib.directory in ancestry(first_item.path):
log.debug(u'moving album {0}', album_id) log.debug(u'moving album {0}', album_id)
album.move()
# Manually moving and storing the album.
items = list(album.items())
for item in items:
item.move(store=False)
item.store(fields=fields)
album.move(store=False)
album.store(fields=fields)
def update_func(lib, opts, args): def update_func(lib, opts, args):
update_items(lib, decargs(args), opts.album, ui.should_move(opts.move), update_items(lib, decargs(args), opts.album, ui.should_move(opts.move),
opts.pretend) opts.pretend, opts.fields)
update_cmd = ui.Subcommand( update_cmd = ui.Subcommand(
@ -1188,19 +1193,25 @@ update_cmd.parser.add_option(
u'-p', u'--pretend', action='store_true', u'-p', u'--pretend', action='store_true',
help=u"show all changes but do nothing" help=u"show all changes but do nothing"
) )
update_cmd.parser.add_option(
u'-F', u'--field', default=None, action='append', dest='fields',
help=u'list of fields to update'
)
update_cmd.func = update_func update_cmd.func = update_func
default_commands.append(update_cmd) default_commands.append(update_cmd)
# remove: Remove items from library, delete files. # remove: Remove items from library, delete files.
def remove_items(lib, query, album, delete): def remove_items(lib, query, album, delete, force):
"""Remove items matching query from lib. If album, then match and """Remove items matching query from lib. If album, then match and
remove whole albums. If delete, also remove files from disk. remove whole albums. If delete, also remove files from disk.
""" """
# Get the matching items. # Get the matching items.
items, albums = _do_query(lib, query, album) items, albums = _do_query(lib, query, album)
# Confirm file removal if not forcing removal.
if not force:
# Prepare confirmation with user. # Prepare confirmation with user.
print_() print_()
if delete: if delete:
@ -1208,7 +1219,7 @@ def remove_items(lib, query, album, delete):
prompt = u'Really DELETE %i file%s (y/n)?' % \ prompt = u'Really DELETE %i file%s (y/n)?' % \
(len(items), 's' if len(items) > 1 else '') (len(items), 's' if len(items) > 1 else '')
else: else:
fmt = '' fmt = u''
prompt = u'Really remove %i item%s from the library (y/n)?' % \ prompt = u'Really remove %i item%s from the library (y/n)?' % \
(len(items), 's' if len(items) > 1 else '') (len(items), 's' if len(items) > 1 else '')
@ -1227,7 +1238,7 @@ def remove_items(lib, query, album, delete):
def remove_func(lib, opts, args): def remove_func(lib, opts, args):
remove_items(lib, decargs(args), opts.album, opts.delete) remove_items(lib, decargs(args), opts.album, opts.delete, opts.force)
remove_cmd = ui.Subcommand( remove_cmd = ui.Subcommand(
@ -1237,6 +1248,10 @@ remove_cmd.parser.add_option(
u"-d", u"--delete", action="store_true", u"-d", u"--delete", action="store_true",
help=u"also remove files from disk" help=u"also remove files from disk"
) )
remove_cmd.parser.add_option(
u"-f", u"--force", action="store_true",
help=u"do not ask when removing items"
)
remove_cmd.parser.add_album_option() remove_cmd.parser.add_album_option()
remove_cmd.func = remove_func remove_cmd.func = remove_func
default_commands.append(remove_cmd) default_commands.append(remove_cmd)
@ -1310,6 +1325,7 @@ default_commands.append(stats_cmd)
def show_version(lib, opts, args): def show_version(lib, opts, args):
print_(u'beets version %s' % beets.__version__) print_(u'beets version %s' % beets.__version__)
print_(u'Python version {}'.format(python_version()))
# Show plugins. # Show plugins.
names = sorted(p.name for p in plugins.find_plugins()) names = sorted(p.name for p in plugins.find_plugins())
if names: if names:
@ -1454,7 +1470,8 @@ default_commands.append(modify_cmd)
# move: Move/copy files to the library or a new base directory. # move: Move/copy files to the library or a new base directory.
def move_items(lib, dest, query, copy, album, pretend, confirm=False): def move_items(lib, dest, query, copy, album, pretend, confirm=False,
export=False):
"""Moves or copies items to a new base directory, given by dest. If """Moves or copies items to a new base directory, given by dest. If
dest is None, then the library's base directory is used, making the dest is None, then the library's base directory is used, making the
command "consolidate" files. command "consolidate" files.
@ -1467,6 +1484,7 @@ def move_items(lib, dest, query, copy, album, pretend, confirm=False):
isalbummoved = lambda album: any(isitemmoved(i) for i in album.items()) isalbummoved = lambda album: any(isitemmoved(i) for i in album.items())
objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)] objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)]
copy = copy or export # Exporting always copies.
action = u'Copying' if copy else u'Moving' action = u'Copying' if copy else u'Moving'
act = u'copy' if copy else u'move' act = u'copy' if copy else u'move'
entity = u'album' if album else u'item' entity = u'album' if album else u'item'
@ -1492,8 +1510,16 @@ def move_items(lib, dest, query, copy, album, pretend, confirm=False):
for obj in objs: for obj in objs:
log.debug(u'moving: {0}', util.displayable_path(obj.path)) log.debug(u'moving: {0}', util.displayable_path(obj.path))
obj.move(copy, basedir=dest) if export:
obj.store() # Copy without affecting the database.
obj.move(operation=MoveOperation.COPY, basedir=dest,
store=False)
else:
# Ordinary move/copy: store the new path.
if copy:
obj.move(operation=MoveOperation.COPY, basedir=dest)
else:
obj.move(operation=MoveOperation.MOVE, basedir=dest)
def move_func(lib, opts, args): def move_func(lib, opts, args):
@ -1504,7 +1530,7 @@ def move_func(lib, opts, args):
raise ui.UserError(u'no such directory: %s' % dest) raise ui.UserError(u'no such directory: %s' % dest)
move_items(lib, dest, decargs(args), opts.copy, opts.album, opts.pretend, move_items(lib, dest, decargs(args), opts.copy, opts.album, opts.pretend,
opts.timid) opts.timid, opts.export)
move_cmd = ui.Subcommand( move_cmd = ui.Subcommand(
@ -1526,6 +1552,10 @@ move_cmd.parser.add_option(
u'-t', u'--timid', dest='timid', action='store_true', u'-t', u'--timid', dest='timid', action='store_true',
help=u'always confirm all actions' help=u'always confirm all actions'
) )
move_cmd.parser.add_option(
u'-e', u'--export', default=False, action='store_true',
help=u'copy without changing the database path'
)
move_cmd.parser.add_album_option() move_cmd.parser.add_album_option()
move_cmd.func = move_func move_cmd.func = move_func
default_commands.append(move_cmd) default_commands.append(move_cmd)
@ -1601,7 +1631,7 @@ def config_func(lib, opts, args):
filenames.insert(0, user_path) filenames.insert(0, user_path)
for filename in filenames: for filename in filenames:
print_(filename) print_(displayable_path(filename))
# Open in editor. # Open in editor.
elif opts.edit: elif opts.edit:
@ -1609,7 +1639,8 @@ def config_func(lib, opts, args):
# Dump configuration. # Dump configuration.
else: else:
print_(config.dump(full=opts.defaults, redact=opts.redact)) config_out = config.dump(full=opts.defaults, redact=opts.redact)
print_(util.text_string(config_out))
def config_edit(): def config_edit():
@ -1655,17 +1686,19 @@ default_commands.append(config_cmd)
def print_completion(*args): def print_completion(*args):
for line in completion_script(default_commands + plugins.commands()): for line in completion_script(default_commands + plugins.commands()):
print_(line, end='') print_(line, end=u'')
if not any(map(os.path.isfile, BASH_COMPLETION_PATHS)): if not any(map(os.path.isfile, BASH_COMPLETION_PATHS)):
log.warn(u'Warning: Unable to find the bash-completion package. ' log.warning(u'Warning: Unable to find the bash-completion package. '
u'Command line completion might not work.') u'Command line completion might not work.')
BASH_COMPLETION_PATHS = map(syspath, [ BASH_COMPLETION_PATHS = map(syspath, [
u'/etc/bash_completion', u'/etc/bash_completion',
u'/usr/share/bash-completion/bash_completion', u'/usr/share/bash-completion/bash_completion',
u'/usr/share/local/bash-completion/bash_completion', u'/usr/local/share/bash-completion/bash_completion',
u'/opt/local/share/bash-completion/bash_completion', # SmartOS # SmartOS
u'/usr/local/etc/bash_completion', # Homebrew u'/opt/local/share/bash-completion/bash_completion',
# Homebrew (before bash-completion2)
u'/usr/local/etc/bash_completion',
]) ])
@ -1677,7 +1710,7 @@ def completion_script(commands):
""" """
base_script = os.path.join(_package_path('beets.ui'), 'completion_base.sh') base_script = os.path.join(_package_path('beets.ui'), 'completion_base.sh')
with open(base_script, 'r') as base_script: with open(base_script, 'r') as base_script:
yield base_script.read() yield util.text_string(base_script.read())
options = {} options = {}
aliases = {} aliases = {}
@ -1692,12 +1725,12 @@ def completion_script(commands):
if re.match(r'^\w+$', alias): if re.match(r'^\w+$', alias):
aliases[alias] = name aliases[alias] = name
options[name] = {'flags': [], 'opts': []} options[name] = {u'flags': [], u'opts': []}
for opts in cmd.parser._get_all_options()[1:]: for opts in cmd.parser._get_all_options()[1:]:
if opts.action in ('store_true', 'store_false'): if opts.action in ('store_true', 'store_false'):
option_type = 'flags' option_type = u'flags'
else: else:
option_type = 'opts' option_type = u'opts'
options[name][option_type].extend( options[name][option_type].extend(
opts._short_opts + opts._long_opts opts._short_opts + opts._long_opts
@ -1705,14 +1738,14 @@ def completion_script(commands):
# Add global options # Add global options
options['_global'] = { options['_global'] = {
'flags': [u'-v', u'--verbose'], u'flags': [u'-v', u'--verbose'],
'opts': u'-l --library -c --config -d --directory -h --help'.split( u'opts':
u' ') u'-l --library -c --config -d --directory -h --help'.split(u' ')
} }
# Add flags common to all commands # Add flags common to all commands
options['_common'] = { options['_common'] = {
'flags': [u'-h', u'--help'] u'flags': [u'-h', u'--help']
} }
# Start generating the script # Start generating the script
@ -1725,21 +1758,24 @@ def completion_script(commands):
# Command aliases # Command aliases
yield u" local aliases='%s'\n" % ' '.join(aliases.keys()) yield u" local aliases='%s'\n" % ' '.join(aliases.keys())
for alias, cmd in aliases.items(): for alias, cmd in aliases.items():
yield u" local alias__%s=%s\n" % (alias, cmd) yield u" local alias__%s=%s\n" % (alias.replace('-', '_'), cmd)
yield u'\n' yield u'\n'
# Fields # Fields
yield u" fields='%s'\n" % ' '.join( yield u" fields='%s'\n" % ' '.join(
set(library.Item._fields.keys() + library.Album._fields.keys()) set(
list(library.Item._fields.keys()) +
list(library.Album._fields.keys())
)
) )
# Command options # Command options
for cmd, opts in options.items(): for cmd, opts in options.items():
for option_type, option_list in opts.items(): for option_type, option_list in opts.items():
if option_list: if option_list:
option_list = ' '.join(option_list) option_list = u' '.join(option_list)
yield u" local %s__%s='%s'\n" % ( yield u" local %s__%s='%s'\n" % (
option_type, cmd, option_list) option_type, cmd.replace('-', '_'), option_list)
yield u' _beet_dispatch\n' yield u' _beet_dispatch\n'
yield u'}\n' yield u'}\n'

View file

@ -70,7 +70,7 @@ _beet_dispatch() {
# Replace command shortcuts # Replace command shortcuts
if [[ -n $cmd ]] && _list_include_item "$aliases" "$cmd"; then if [[ -n $cmd ]] && _list_include_item "$aliases" "$cmd"; then
eval "cmd=\$alias__$cmd" eval "cmd=\$alias__${cmd//-/_}"
fi fi
case $cmd in case $cmd in
@ -94,8 +94,8 @@ _beet_dispatch() {
_beet_complete() { _beet_complete() {
if [[ $cur == -* ]]; then if [[ $cur == -* ]]; then
local opts flags completions local opts flags completions
eval "opts=\$opts__$cmd" eval "opts=\$opts__${cmd//-/_}"
eval "flags=\$flags__$cmd" eval "flags=\$flags__${cmd//-/_}"
completions="${flags___common} ${opts} ${flags}" completions="${flags___common} ${opts} ${flags}"
COMPREPLY+=( $(compgen -W "$completions" -- $cur) ) COMPREPLY+=( $(compgen -W "$completions" -- $cur) )
else else
@ -129,7 +129,7 @@ _beet_complete_global() {
COMPREPLY+=( $(compgen -W "$completions" -- $cur) ) COMPREPLY+=( $(compgen -W "$completions" -- $cur) )
elif [[ -n $cur ]] && _list_include_item "$aliases" "$cur"; then elif [[ -n $cur ]] && _list_include_item "$aliases" "$cur"; then
local cmd local cmd
eval "cmd=\$alias__$cur" eval "cmd=\$alias__${cur//-/_}"
COMPREPLY+=( "$cmd" ) COMPREPLY+=( "$cmd" )
else else
COMPREPLY+=( $(compgen -W "$commands" -- $cur) ) COMPREPLY+=( $(compgen -W "$commands" -- $cur) )
@ -138,7 +138,7 @@ _beet_complete_global() {
_beet_complete_query() { _beet_complete_query() {
local opts local opts
eval "opts=\$opts__$cmd" eval "opts=\$opts__${cmd//-/_}"
if [[ $cur == -* ]] || _list_include_item "$opts" "$prev"; then if [[ $cur == -* ]] || _list_include_item "$opts" "$prev"; then
_beet_complete _beet_complete

View file

@ -18,6 +18,8 @@
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import os import os
import sys import sys
import errno
import locale
import re import re
import shutil import shutil
import fnmatch import fnmatch
@ -27,10 +29,14 @@ import subprocess
import platform import platform
import shlex import shlex
from beets.util import hidden from beets.util import hidden
import six
from unidecode import unidecode
from enum import Enum
MAX_FILENAME_LENGTH = 200 MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = u'\\\\?\\' WINDOWS_MAGIC_PREFIX = u'\\\\?\\'
SNI_SUPPORTED = sys.version_info >= (2, 7, 9)
class HumanReadableException(Exception): class HumanReadableException(Exception):
@ -65,14 +71,14 @@ class HumanReadableException(Exception):
def _reasonstr(self): def _reasonstr(self):
"""Get the reason as a string.""" """Get the reason as a string."""
if isinstance(self.reason, unicode): if isinstance(self.reason, six.text_type):
return self.reason return self.reason
elif isinstance(self.reason, basestring): # Byte string. elif isinstance(self.reason, bytes):
return self.reason.decode('utf8', 'ignore') return self.reason.decode('utf-8', 'ignore')
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror return self.reason.strerror
else: else:
return u'"{0}"'.format(unicode(self.reason)) return u'"{0}"'.format(six.text_type(self.reason))
def get_message(self): def get_message(self):
"""Create the human-readable description of the error, sans """Create the human-readable description of the error, sans
@ -119,6 +125,15 @@ class FilesystemError(HumanReadableException):
return u'{0} {1}'.format(self._reasonstr(), clause) return u'{0} {1}'.format(self._reasonstr(), clause)
class MoveOperation(Enum):
"""The file operations that e.g. various move functions can carry out.
"""
MOVE = 0
COPY = 1
LINK = 2
HARDLINK = 3
def normpath(path): def normpath(path):
"""Provide the canonical form of the path suitable for storing in """Provide the canonical form of the path suitable for storing in
the database. the database.
@ -158,15 +173,16 @@ def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
pattern in `ignore` are skipped. If `logger` is provided, then pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed. warning messages are logged there when a directory cannot be listed.
""" """
# Make sure the path isn't a Unicode string. # Make sure the pathes aren't Unicode strings.
path = bytestring_path(path) path = bytestring_path(path)
ignore = [bytestring_path(i) for i in ignore]
# Get all the directories and files at this level. # Get all the directories and files at this level.
try: try:
contents = os.listdir(syspath(path)) contents = os.listdir(syspath(path))
except OSError as exc: except OSError as exc:
if logger: if logger:
logger.warn(u'could not list directory {0}: {1}'.format( logger.warning(u'could not list directory {0}: {1}'.format(
displayable_path(path), exc.strerror displayable_path(path), exc.strerror
)) ))
return return
@ -264,7 +280,9 @@ def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
if not os.path.exists(directory): if not os.path.exists(directory):
# Directory gone already. # Directory gone already.
continue continue
if fnmatch_all(os.listdir(directory), clutter): clutter = [bytestring_path(c) for c in clutter]
match_paths = [bytestring_path(d) for d in os.listdir(directory)]
if fnmatch_all(match_paths, clutter):
# Directory contains only clutter (or nothing). # Directory contains only clutter (or nothing).
try: try:
shutil.rmtree(directory) shutil.rmtree(directory)
@ -298,6 +316,18 @@ def components(path):
return comps return comps
def arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf-8'
def _fsencoding(): def _fsencoding():
"""Get the system's filesystem encoding. On Windows, this is always """Get the system's filesystem encoding. On Windows, this is always
UTF-8 (not MBCS). UTF-8 (not MBCS).
@ -309,7 +339,7 @@ def _fsencoding():
# for Windows paths, so the encoding is actually immaterial so # for Windows paths, so the encoding is actually immaterial so
# we can avoid dealing with this nastiness. We arbitrarily # we can avoid dealing with this nastiness. We arbitrarily
# choose UTF-8. # choose UTF-8.
encoding = 'utf8' encoding = 'utf-8'
return encoding return encoding
@ -327,11 +357,14 @@ def bytestring_path(path):
if os.path.__name__ == 'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX): if os.path.__name__ == 'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX):
path = path[len(WINDOWS_MAGIC_PREFIX):] path = path[len(WINDOWS_MAGIC_PREFIX):]
# Try to encode with default encodings, but fall back to UTF8. # Try to encode with default encodings, but fall back to utf-8.
try: try:
return path.encode(_fsencoding()) return path.encode(_fsencoding())
except (UnicodeError, LookupError): except (UnicodeError, LookupError):
return path.encode('utf8') return path.encode('utf-8')
PATH_SEP = bytestring_path(os.sep)
def displayable_path(path, separator=u'; '): def displayable_path(path, separator=u'; '):
@ -341,16 +374,16 @@ def displayable_path(path, separator=u'; '):
""" """
if isinstance(path, (list, tuple)): if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path) return separator.join(displayable_path(p) for p in path)
elif isinstance(path, unicode): elif isinstance(path, six.text_type):
return path return path
elif not isinstance(path, bytes): elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation. # A non-string object: just get its unicode representation.
return unicode(path) return six.text_type(path)
try: try:
return path.decode(_fsencoding(), 'ignore') return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError): except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore') return path.decode('utf-8', 'ignore')
def syspath(path, prefix=True): def syspath(path, prefix=True):
@ -364,12 +397,12 @@ def syspath(path, prefix=True):
if os.path.__name__ != 'ntpath': if os.path.__name__ != 'ntpath':
return path return path
if not isinstance(path, unicode): if not isinstance(path, six.text_type):
# Beets currently represents Windows paths internally with UTF-8 # Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is # arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both. # reported as the FS encoding by Windows. Try both.
try: try:
path = path.decode('utf8') path = path.decode('utf-8')
except UnicodeError: except UnicodeError:
# The encoding should always be MBCS, Windows' broken # The encoding should always be MBCS, Windows' broken
# Unicode representation. # Unicode representation.
@ -389,6 +422,8 @@ def syspath(path, prefix=True):
def samefile(p1, p2): def samefile(p1, p2):
"""Safer equality for paths.""" """Safer equality for paths."""
if p1 == p2:
return True
return shutil._samefile(syspath(p1), syspath(p2)) return shutil._samefile(syspath(p1), syspath(p2))
@ -437,8 +472,7 @@ def move(path, dest, replace=False):
path = syspath(path) path = syspath(path)
dest = syspath(dest) dest = syspath(dest)
if os.path.exists(dest) and not replace: if os.path.exists(dest) and not replace:
raise FilesystemError(u'file exists', 'rename', (path, dest), raise FilesystemError(u'file exists', 'rename', (path, dest))
traceback.format_exc())
# First, try renaming the file. # First, try renaming the file.
try: try:
@ -456,20 +490,49 @@ def move(path, dest, replace=False):
def link(path, dest, replace=False): def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if """Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if `dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.""" `path` == `dest`.
if (samefile(path, dest)): """
if samefile(path, dest):
return return
path = syspath(path) if os.path.exists(syspath(dest)) and not replace:
dest = syspath(dest) raise FilesystemError(u'file exists', 'rename', (path, dest))
if os.path.exists(dest) and not replace:
raise FilesystemError(u'file exists', 'rename', (path, dest),
traceback.format_exc())
try: try:
os.symlink(path, dest) os.symlink(syspath(path), syspath(dest))
except OSError: except NotImplementedError:
raise FilesystemError(u'Operating system does not support symbolic ' # raised on python >= 3.2 and Windows versions before Vista
u'links.', 'link', (path, dest), raise FilesystemError(u'OS does not support symbolic links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
# TODO: Windows version checks can be removed for python 3
if hasattr('sys', 'getwindowsversion'):
if sys.getwindowsversion()[0] < 6: # is before Vista
exc = u'OS does not support symbolic links.'
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def hardlink(path, dest, replace=False):
"""Create a hard link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError(u'file exists', 'rename', (path, dest))
try:
os.link(syspath(path), syspath(dest))
except NotImplementedError:
raise FilesystemError(u'OS does not support hard links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
if exc.errno == errno.EXDEV:
raise FilesystemError(u'Cannot hard link across devices.'
'link', (path, dest), traceback.format_exc())
else:
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc()) traceback.format_exc())
@ -490,7 +553,8 @@ def unique_path(path):
num = 0 num = 0
while True: while True:
num += 1 num += 1
new_path = b'%s.%i%s' % (base, num, ext) suffix = u'.{}'.format(num).encode() + ext
new_path = base + suffix
if not os.path.exists(new_path): if not os.path.exists(new_path):
return new_path return new_path
@ -594,7 +658,7 @@ def legalize_path(path, replacements, length, extension, fragment):
if fragment: if fragment:
# Outputting Unicode. # Outputting Unicode.
extension = extension.decode('utf8', 'ignore') extension = extension.decode('utf-8', 'ignore')
first_stage_path, _ = _legalize_stage( first_stage_path, _ = _legalize_stage(
path, replacements, length, extension, fragment path, replacements, length, extension, fragment
@ -618,6 +682,24 @@ def legalize_path(path, replacements, length, extension, fragment):
return second_stage_path, retruncated return second_stage_path, retruncated
def py3_path(path):
"""Convert a bytestring path to Unicode on Python 3 only. On Python
2, return the bytestring path unchanged.
This helps deal with APIs on Python 3 that *only* accept Unicode
(i.e., `str` objects). I philosophically disagree with this
decision, because paths are sadly bytes on Unix, but that's the way
it is. So this function helps us "smuggle" the true bytes data
through APIs that took Python 3's Unicode mandate too seriously.
"""
if isinstance(path, six.text_type):
return path
assert isinstance(path, bytes)
if six.PY2:
return path
return os.fsdecode(path)
def str2bool(value): def str2bool(value):
"""Returns a boolean reflecting a human-entered string.""" """Returns a boolean reflecting a human-entered string."""
return value.lower() in (u'yes', u'1', u'true', u't', u'y') return value.lower() in (u'yes', u'1', u'true', u't', u'y')
@ -627,14 +709,32 @@ def as_string(value):
"""Convert a value to a Unicode object for matching with a query. """Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded. None becomes the empty string. Bytestrings are silently decoded.
""" """
if six.PY2:
buffer_types = buffer, memoryview # noqa: F821
else:
buffer_types = memoryview
if value is None: if value is None:
return u'' return u''
elif isinstance(value, buffer): elif isinstance(value, buffer_types):
return bytes(value).decode('utf8', 'ignore') return bytes(value).decode('utf-8', 'ignore')
elif isinstance(value, bytes): elif isinstance(value, bytes):
return value.decode('utf8', 'ignore') return value.decode('utf-8', 'ignore')
else: else:
return unicode(value) return six.text_type(value)
def text_string(value, encoding='utf-8'):
"""Convert a string, which can either be bytes or unicode, to
unicode.
Text (unicode) is left untouched; bytes are decoded. This is useful
to convert from a "native string" (bytes on Python 2, str on Python
3) to a consistently unicode value.
"""
if isinstance(value, bytes):
return value.decode(encoding)
return value
def plurality(objs): def plurality(objs):
@ -661,7 +761,7 @@ def cpu_count():
num = 0 num = 0
elif sys.platform == 'darwin': elif sys.platform == 'darwin':
try: try:
num = int(command_output([b'/usr/sbin/sysctl', b'-n', b'hw.ncpu'])) num = int(command_output(['/usr/sbin/sysctl', '-n', 'hw.ncpu']))
except (ValueError, OSError, subprocess.CalledProcessError): except (ValueError, OSError, subprocess.CalledProcessError):
num = 0 num = 0
else: else:
@ -675,10 +775,28 @@ def cpu_count():
return 1 return 1
def convert_command_args(args):
"""Convert command arguments to bytestrings on Python 2 and
surrogate-escaped strings on Python 3."""
assert isinstance(args, list)
def convert(arg):
if six.PY2:
if isinstance(arg, six.text_type):
arg = arg.encode(arg_encoding())
else:
if isinstance(arg, bytes):
arg = arg.decode(arg_encoding(), 'surrogateescape')
return arg
return [convert(a) for a in args]
def command_output(cmd, shell=False): def command_output(cmd, shell=False):
"""Runs the command and returns its output after it has exited. """Runs the command and returns its output after it has exited.
``cmd`` is a list of byte string arguments starting with the command names. ``cmd`` is a list of arguments starting with the command names. The
arguments are bytes on Unix and strings on Windows.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute. shell to execute.
@ -689,10 +807,18 @@ def command_output(cmd, shell=False):
This replaces `subprocess.check_output` which can have problems if lots of This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr. output is sent to stderr.
""" """
cmd = convert_command_args(cmd)
try: # python >= 3.3
devnull = subprocess.DEVNULL
except AttributeError:
devnull = open(os.devnull, 'r+b')
proc = subprocess.Popen( proc = subprocess.Popen(
cmd, cmd,
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=devnull,
close_fds=platform.system() != 'Windows', close_fds=platform.system() != 'Windows',
shell=shell shell=shell
) )
@ -700,7 +826,7 @@ def command_output(cmd, shell=False):
if proc.returncode: if proc.returncode:
raise subprocess.CalledProcessError( raise subprocess.CalledProcessError(
returncode=proc.returncode, returncode=proc.returncode,
cmd=b' '.join(cmd), cmd=' '.join(cmd),
output=stdout + stderr, output=stdout + stderr,
) )
return stdout return stdout
@ -756,15 +882,14 @@ def shlex_split(s):
Raise `ValueError` if the string is not a well-formed shell string. Raise `ValueError` if the string is not a well-formed shell string.
This is a workaround for a bug in some versions of Python. This is a workaround for a bug in some versions of Python.
""" """
if isinstance(s, bytes): if not six.PY2 or isinstance(s, bytes): # Shlex works fine.
# Shlex works fine.
return shlex.split(s) return shlex.split(s)
elif isinstance(s, unicode): elif isinstance(s, six.text_type):
# Work around a Python bug. # Work around a Python bug.
# http://bugs.python.org/issue6988 # http://bugs.python.org/issue6988
bs = s.encode('utf8') bs = s.encode('utf-8')
return [c.decode('utf8') for c in shlex.split(bs)] return [c.decode('utf-8') for c in shlex.split(bs)]
else: else:
raise TypeError(u'shlex_split called with non-string') raise TypeError(u'shlex_split called with non-string')
@ -796,8 +921,8 @@ def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical, """Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename. long path given a short filename.
""" """
if not isinstance(short_path, unicode): if not isinstance(short_path, six.text_type):
short_path = unicode(short_path) short_path = short_path.decode(_fsencoding())
import ctypes import ctypes
buf = ctypes.create_unicode_buffer(260) buf = ctypes.create_unicode_buffer(260)
@ -860,3 +985,27 @@ def raw_seconds_short(string):
raise ValueError(u'String not in M:SS format') raise ValueError(u'String not in M:SS format')
minutes, seconds = map(int, match.groups()) minutes, seconds = map(int, match.groups())
return float(minutes * 60 + seconds) return float(minutes * 60 + seconds)
def asciify_path(path, sep_replace):
"""Decodes all unicode characters in a path into ASCII equivalents.
Substitutions are provided by the unidecode module. Path separators in the
input are preserved.
Keyword arguments:
path -- The path to be asciified.
sep_replace -- the string to be used to replace extraneous path separators.
"""
# if this platform has an os.altsep, change it to os.sep.
if os.altsep:
path = path.replace(os.altsep, os.sep)
path_components = path.split(os.sep)
for index, item in enumerate(path_components):
path_components[index] = unidecode(item).replace(os.sep, sep_replace)
if os.altsep:
path_components[index] = unidecode(item).replace(
os.altsep,
sep_replace
)
return os.sep.join(path_components)

View file

@ -18,20 +18,23 @@ public resizing proxy if neither is available.
""" """
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import urllib
import subprocess import subprocess
import os import os
import re import re
from tempfile import NamedTemporaryFile from tempfile import NamedTemporaryFile
from six.moves.urllib.parse import urlencode
from beets import logging from beets import logging
from beets import util from beets import util
import six
# Resizing methods # Resizing methods
PIL = 1 PIL = 1
IMAGEMAGICK = 2 IMAGEMAGICK = 2
WEBPROXY = 3 WEBPROXY = 3
if util.SNI_SUPPORTED:
PROXY_URL = 'https://images.weserv.nl/'
else:
PROXY_URL = 'http://images.weserv.nl/' PROXY_URL = 'http://images.weserv.nl/'
log = logging.getLogger('beets') log = logging.getLogger('beets')
@ -41,9 +44,9 @@ def resize_url(url, maxwidth):
"""Return a proxied image URL that resizes the original image to """Return a proxied image URL that resizes the original image to
maxwidth (preserving aspect ratio). maxwidth (preserving aspect ratio).
""" """
return '{0}?{1}'.format(PROXY_URL, urllib.urlencode({ return '{0}?{1}'.format(PROXY_URL, urlencode({
'url': url.replace('http://', ''), 'url': url.replace('http://', ''),
'w': bytes(maxwidth), 'w': maxwidth,
})) }))
@ -52,8 +55,8 @@ def temp_file_for(path):
specified path. specified path.
""" """
ext = os.path.splitext(path)[1] ext = os.path.splitext(path)[1]
with NamedTemporaryFile(suffix=ext, delete=False) as f: with NamedTemporaryFile(suffix=util.py3_path(ext), delete=False) as f:
return f.name return util.bytestring_path(f.name)
def pil_resize(maxwidth, path_in, path_out=None): def pil_resize(maxwidth, path_in, path_out=None):
@ -85,18 +88,17 @@ def im_resize(maxwidth, path_in, path_out=None):
log.debug(u'artresizer: ImageMagick resizing {0} to {1}', log.debug(u'artresizer: ImageMagick resizing {0} to {1}',
util.displayable_path(path_in), util.displayable_path(path_out)) util.displayable_path(path_in), util.displayable_path(path_out))
# "-resize widthxheight>" shrinks images with dimension(s) larger # "-resize WIDTHx>" shrinks images with the width larger
# than the corresponding width and/or height dimension(s). The > # than the given width while maintaining the aspect ratio
# "only shrink" flag is prefixed by ^ escape char for Windows # with regards to the height.
# compatibility.
try: try:
util.command_output([ util.command_output([
b'convert', util.syspath(path_in, prefix=False), 'convert', util.syspath(path_in, prefix=False),
b'-resize', b'{0}x^>'.format(maxwidth), '-resize', '{0}x>'.format(maxwidth),
util.syspath(path_out, prefix=False), util.syspath(path_out, prefix=False),
]) ])
except subprocess.CalledProcessError: except subprocess.CalledProcessError:
log.warn(u'artresizer: IM convert failed for {0}', log.warning(u'artresizer: IM convert failed for {0}',
util.displayable_path(path_in)) util.displayable_path(path_in))
return path_in return path_in
return path_out return path_out
@ -119,12 +121,12 @@ def pil_getsize(path_in):
def im_getsize(path_in): def im_getsize(path_in):
cmd = [b'identify', b'-format', b'%w %h', cmd = ['identify', '-format', '%w %h',
util.syspath(path_in, prefix=False)] util.syspath(path_in, prefix=False)]
try: try:
out = util.command_output(cmd) out = util.command_output(cmd)
except subprocess.CalledProcessError as exc: except subprocess.CalledProcessError as exc:
log.warn(u'ImageMagick size query failed') log.warning(u'ImageMagick size query failed')
log.debug( log.debug(
u'`convert` exited with (status {}) when ' u'`convert` exited with (status {}) when '
u'getting size with command {}:\n{}', u'getting size with command {}:\n{}',
@ -134,7 +136,7 @@ def im_getsize(path_in):
try: try:
return tuple(map(int, out.split(b' '))) return tuple(map(int, out.split(b' ')))
except IndexError: except IndexError:
log.warn(u'Could not understand IM output: {0!r}', out) log.warning(u'Could not understand IM output: {0!r}', out)
BACKEND_GET_SIZE = { BACKEND_GET_SIZE = {
@ -149,21 +151,20 @@ class Shareable(type):
lazily-created shared instance of ``MyClass`` while calling lazily-created shared instance of ``MyClass`` while calling
``MyClass()`` to construct a new object works as usual. ``MyClass()`` to construct a new object works as usual.
""" """
def __init__(self, name, bases, dict): def __init__(cls, name, bases, dict):
super(Shareable, self).__init__(name, bases, dict) super(Shareable, cls).__init__(name, bases, dict)
self._instance = None cls._instance = None
@property @property
def shared(self): def shared(cls):
if self._instance is None: if cls._instance is None:
self._instance = self() cls._instance = cls()
return self._instance return cls._instance
class ArtResizer(object): class ArtResizer(six.with_metaclass(Shareable, object)):
"""A singleton class that performs image resizes. """A singleton class that performs image resizes.
""" """
__metaclass__ = Shareable
def __init__(self): def __init__(self):
"""Create a resizer object with an inferred method. """Create a resizer object with an inferred method.
@ -231,12 +232,13 @@ class ArtResizer(object):
def get_im_version(): def get_im_version():
"""Return Image Magick version or None if it is unavailable """Return Image Magick version or None if it is unavailable
Try invoking ImageMagick's "convert".""" Try invoking ImageMagick's "convert".
"""
try: try:
out = util.command_output([b'identify', b'--version']) out = util.command_output(['convert', '--version'])
if 'imagemagick' in out.lower(): if b'imagemagick' in out.lower():
pattern = r".+ (\d+)\.(\d+)\.(\d+).*" pattern = br".+ (\d+)\.(\d+)\.(\d+).*"
match = re.search(pattern, out) match = re.search(pattern, out)
if match: if match:
return (int(match.group(1)), return (int(match.group(1)),
@ -244,7 +246,8 @@ def get_im_version():
int(match.group(3))) int(match.group(3)))
return (0,) return (0,)
except (subprocess.CalledProcessError, OSError): except (subprocess.CalledProcessError, OSError) as exc:
log.debug(u'ImageMagick check `convert --version` failed: {}', exc)
return None return None

View file

@ -9,6 +9,7 @@ Bluelet: easy concurrency without all the messy parallelism.
""" """
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import six
import socket import socket
import select import select
import sys import sys
@ -19,20 +20,6 @@ import time
import collections import collections
# A little bit of "six" (Python 2/3 compatibility): cope with PEP 3109 syntax
# changes.
PY3 = sys.version_info[0] == 3
if PY3:
def _reraise(typ, exc, tb):
raise exc.with_traceback(tb)
else:
exec("""
def _reraise(typ, exc, tb):
raise typ, exc, tb
""")
# Basic events used for thread scheduling. # Basic events used for thread scheduling.
class Event(object): class Event(object):
@ -214,7 +201,7 @@ class ThreadException(Exception):
self.exc_info = exc_info self.exc_info = exc_info
def reraise(self): def reraise(self):
_reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2]) six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
SUSPENDED = Event() # Special sentinel placeholder for suspended threads. SUSPENDED = Event() # Special sentinel placeholder for suspended threads.
@ -282,7 +269,7 @@ def run(root_coro):
except StopIteration: except StopIteration:
# Thread is done. # Thread is done.
complete_thread(coro, None) complete_thread(coro, None)
except: except BaseException:
# Thread raised some other exception. # Thread raised some other exception.
del threads[coro] del threads[coro]
raise ThreadException(coro, sys.exc_info()) raise ThreadException(coro, sys.exc_info())
@ -379,7 +366,7 @@ def run(root_coro):
exit_te = te exit_te = te
break break
except: except BaseException:
# For instance, KeyboardInterrupt during select(). Raise # For instance, KeyboardInterrupt during select(). Raise
# into root thread and terminate others. # into root thread and terminate others.
threads = {root_coro: ExceptionEvent(sys.exc_info())} threads = {root_coro: ExceptionEvent(sys.exc_info())}

View file

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# This file is part of Confit. # This file is part of Confuse.
# Copyright 2016, Adrian Sampson. # Copyright 2016, Adrian Sampson.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
@ -24,10 +24,7 @@ import sys
import yaml import yaml
import collections import collections
import re import re
try:
from collections import OrderedDict from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
UNIX_DIR_VAR = 'XDG_CONFIG_HOME' UNIX_DIR_VAR = 'XDG_CONFIG_HOME'
UNIX_DIR_FALLBACK = '~/.config' UNIX_DIR_FALLBACK = '~/.config'
@ -47,9 +44,9 @@ REDACTED_TOMBSTONE = 'REDACTED'
# Utilities. # Utilities.
PY3 = sys.version_info[0] == 3 PY3 = sys.version_info[0] == 3
STRING = str if PY3 else unicode STRING = str if PY3 else unicode # noqa: F821
BASESTRING = str if PY3 else basestring BASESTRING = str if PY3 else basestring # noqa: F821
NUMERIC_TYPES = (int, float) if PY3 else (int, float, long) NUMERIC_TYPES = (int, float) if PY3 else (int, float, long) # noqa: F821
def iter_first(sequence): def iter_first(sequence):
@ -248,10 +245,15 @@ class ConfigView(object):
def set_args(self, namespace): def set_args(self, namespace):
"""Overlay parsed command-line arguments, generated by a library """Overlay parsed command-line arguments, generated by a library
like argparse or optparse, onto this view's value. like argparse or optparse, onto this view's value. ``namespace``
can be a ``dict`` or namespace object.
""" """
args = {} args = {}
for key, value in namespace.__dict__.items(): if isinstance(namespace, dict):
items = namespace.items()
else:
items = namespace.__dict__.items()
for key, value in items:
if value is not None: # Avoid unset options. if value is not None: # Avoid unset options.
args[key] = value args[key] = value
self.set(args) self.set(args)
@ -386,19 +388,42 @@ class ConfigView(object):
""" """
return as_template(template).value(self, template) return as_template(template).value(self, template)
# Old validation methods (deprecated). # Shortcuts for common templates.
def as_filename(self): def as_filename(self):
"""Get the value as a path. Equivalent to `get(Filename())`.
"""
return self.get(Filename()) return self.get(Filename())
def as_choice(self, choices): def as_choice(self, choices):
"""Get the value from a list of choices. Equivalent to
`get(Choice(choices))`.
"""
return self.get(Choice(choices)) return self.get(Choice(choices))
def as_number(self): def as_number(self):
"""Get the value as any number type: int or float. Equivalent to
`get(Number())`.
"""
return self.get(Number()) return self.get(Number())
def as_str_seq(self): def as_str_seq(self, split=True):
return self.get(StrSeq()) """Get the value as a sequence of strings. Equivalent to
`get(StrSeq())`.
"""
return self.get(StrSeq(split=split))
def as_pairs(self, default_value=None):
"""Get the value as a sequence of pairs of two strings. Equivalent to
`get(Pairs())`.
"""
return self.get(Pairs(default_value=default_value))
def as_str(self):
"""Get the value as a (Unicode) string. Equivalent to
`get(unicode)` on Python 2 and `get(str)` on Python 3.
"""
return self.get(String())
# Redaction. # Redaction.
@ -484,10 +509,9 @@ class Subview(ConfigView):
self.name += '.' self.name += '.'
if isinstance(self.key, int): if isinstance(self.key, int):
self.name += u'#{0}'.format(self.key) self.name += u'#{0}'.format(self.key)
elif isinstance(self.key, BASESTRING): elif isinstance(self.key, bytes):
if isinstance(self.key, bytes): self.name += self.key.decode('utf-8')
self.name += self.key.decode('utf8') elif isinstance(self.key, STRING):
else:
self.name += self.key self.name += self.key
else: else:
self.name += repr(self.key) self.name += repr(self.key)
@ -650,7 +674,7 @@ def load_yaml(filename):
parsed, a ConfigReadError is raised. parsed, a ConfigReadError is raised.
""" """
try: try:
with open(filename, 'r') as f: with open(filename, 'rb') as f:
return yaml.load(f, Loader=Loader) return yaml.load(f, Loader=Loader)
except (IOError, yaml.error.YAMLError) as exc: except (IOError, yaml.error.YAMLError) as exc:
raise ConfigReadError(filename, exc) raise ConfigReadError(filename, exc)
@ -890,9 +914,10 @@ class Configuration(RootView):
default_source = source default_source = source
break break
if default_source and default_source.filename: if default_source and default_source.filename:
with open(default_source.filename, 'r') as fp: with open(default_source.filename, 'rb') as fp:
default_data = fp.read() default_data = fp.read()
yaml_out = restore_yaml_comments(yaml_out, default_data) yaml_out = restore_yaml_comments(yaml_out,
default_data.decode('utf8'))
return yaml_out return yaml_out
@ -953,7 +978,7 @@ should be raised when the value is missing.
class Template(object): class Template(object):
"""A value template for configuration fields. """A value template for configuration fields.
The template works like a type and instructs Confit about how to The template works like a type and instructs Confuse about how to
interpret a deserialized YAML value. This includes type conversions, interpret a deserialized YAML value. This includes type conversions,
providing a default value, and validating for errors. For example, a providing a default value, and validating for errors. For example, a
filepath type might expand tildes and check that the file exists. filepath type might expand tildes and check that the file exists.
@ -1223,30 +1248,77 @@ class StrSeq(Template):
super(StrSeq, self).__init__() super(StrSeq, self).__init__()
self.split = split self.split = split
def _convert_value(self, x, view):
if isinstance(x, STRING):
return x
elif isinstance(x, bytes):
return x.decode('utf-8', 'ignore')
else:
self.fail(u'must be a list of strings', view, True)
def convert(self, value, view): def convert(self, value, view):
if isinstance(value, bytes): if isinstance(value, bytes):
value = value.decode('utf8', 'ignore') value = value.decode('utf-8', 'ignore')
if isinstance(value, STRING): if isinstance(value, STRING):
if self.split: if self.split:
return value.split() value = value.split()
else:
value = [value]
else: else:
return [value]
try: try:
value = list(value) value = list(value)
except TypeError: except TypeError:
self.fail(u'must be a whitespace-separated string or a list', self.fail(u'must be a whitespace-separated string or a list',
view, True) view, True)
def convert(x): return [self._convert_value(v, view) for v in value]
if isinstance(x, STRING):
return x
elif isinstance(x, bytes): class Pairs(StrSeq):
return x.decode('utf8', 'ignore') """A template for ordered key-value pairs.
This can either be given with the same syntax as for `StrSeq` (i.e. without
values), or as a list of strings and/or single-element mappings such as::
- key: value
- [key, value]
- key
The result is a list of two-element tuples. If no value is provided, the
`default_value` will be returned as the second element.
"""
def __init__(self, default_value=None):
"""Create a new template.
`default` is the dictionary value returned for items that are not
a mapping, but a single string.
"""
super(Pairs, self).__init__(split=True)
self.default_value = default_value
def _convert_value(self, x, view):
try:
return (super(Pairs, self)._convert_value(x, view),
self.default_value)
except ConfigTypeError:
if isinstance(x, collections.Mapping):
if len(x) != 1:
self.fail(u'must be a single-element mapping', view, True)
k, v = iter_first(x.items())
elif isinstance(x, collections.Sequence):
if len(x) != 2:
self.fail(u'must be a two-element list', view, True)
k, v = x
else: else:
self.fail(u'must be a list of strings', view, True) # Is this even possible? -> Likely, if some !directive cause
return list(map(convert, value)) # YAML to parse this to some custom type.
self.fail(u'must be a single string, mapping, or a list'
u'' + str(x),
view, True)
return (super(Pairs, self)._convert_value(k, view),
super(Pairs, self)._convert_value(v, view))
class Filename(Template): class Filename(Template):

View file

@ -33,8 +33,8 @@ import re
import ast import ast
import dis import dis
import types import types
import sys
from .confit import NUMERIC_TYPES import six
SYMBOL_DELIM = u'$' SYMBOL_DELIM = u'$'
FUNC_DELIM = u'%' FUNC_DELIM = u'%'
@ -74,11 +74,11 @@ def ex_literal(val):
""" """
if val is None: if val is None:
return ast.Name('None', ast.Load()) return ast.Name('None', ast.Load())
elif isinstance(val, NUMERIC_TYPES): elif isinstance(val, six.integer_types):
return ast.Num(val) return ast.Num(val)
elif isinstance(val, bool): elif isinstance(val, bool):
return ast.Name(bytes(val), ast.Load()) return ast.Name(bytes(val), ast.Load())
elif isinstance(val, basestring): elif isinstance(val, six.string_types):
return ast.Str(val) return ast.Str(val)
raise TypeError(u'no literal for {0}'.format(type(val))) raise TypeError(u'no literal for {0}'.format(type(val)))
@ -97,7 +97,7 @@ def ex_call(func, args):
function may be an expression or the name of a function. Each function may be an expression or the name of a function. Each
argument may be an expression or a value to be used as a literal. argument may be an expression or a value to be used as a literal.
""" """
if isinstance(func, basestring): if isinstance(func, six.string_types):
func = ex_rvalue(func) func = ex_rvalue(func)
args = list(args) args = list(args)
@ -105,7 +105,10 @@ def ex_call(func, args):
if not isinstance(args[i], ast.expr): if not isinstance(args[i], ast.expr):
args[i] = ex_literal(args[i]) args[i] = ex_literal(args[i])
if sys.version_info[:2] < (3, 5):
return ast.Call(func, args, [], None, None) return ast.Call(func, args, [], None, None)
else:
return ast.Call(func, args, [])
def compile_func(arg_names, statements, name='_the_func', debug=False): def compile_func(arg_names, statements, name='_the_func', debug=False):
@ -113,16 +116,31 @@ def compile_func(arg_names, statements, name='_the_func', debug=False):
the resulting Python function. If `debug`, then print out the the resulting Python function. If `debug`, then print out the
bytecode of the compiled function. bytecode of the compiled function.
""" """
if six.PY2:
func_def = ast.FunctionDef( func_def = ast.FunctionDef(
name.encode('utf8'), name=name.encode('utf-8'),
ast.arguments( args=ast.arguments(
[ast.Name(n, ast.Param()) for n in arg_names], args=[ast.Name(n, ast.Param()) for n in arg_names],
None, None, vararg=None,
[ex_literal(None) for _ in arg_names], kwarg=None,
defaults=[ex_literal(None) for _ in arg_names],
), ),
statements, body=statements,
[], decorator_list=[],
) )
else:
func_def = ast.FunctionDef(
name=name,
args=ast.arguments(
args=[ast.arg(arg=n, annotation=None) for n in arg_names],
kwonlyargs=[],
kw_defaults=[],
defaults=[ex_literal(None) for _ in arg_names],
),
body=statements,
decorator_list=[],
)
mod = ast.Module([func_def]) mod = ast.Module([func_def])
ast.fix_missing_locations(mod) ast.fix_missing_locations(mod)
@ -164,8 +182,12 @@ class Symbol(object):
def translate(self): def translate(self):
"""Compile the variable lookup.""" """Compile the variable lookup."""
expr = ex_rvalue(VARIABLE_PREFIX + self.ident.encode('utf8')) if six.PY2:
return [expr], set([self.ident.encode('utf8')]), set() ident = self.ident.encode('utf-8')
else:
ident = self.ident
expr = ex_rvalue(VARIABLE_PREFIX + ident)
return [expr], set([ident]), set()
class Call(object): class Call(object):
@ -190,15 +212,19 @@ class Call(object):
except Exception as exc: except Exception as exc:
# Function raised exception! Maybe inlining the name of # Function raised exception! Maybe inlining the name of
# the exception will help debug. # the exception will help debug.
return u'<%s>' % unicode(exc) return u'<%s>' % six.text_type(exc)
return unicode(out) return six.text_type(out)
else: else:
return self.original return self.original
def translate(self): def translate(self):
"""Compile the function call.""" """Compile the function call."""
varnames = set() varnames = set()
funcnames = set([self.ident.encode('utf8')]) if six.PY2:
ident = self.ident.encode('utf-8')
else:
ident = self.ident
funcnames = set([ident])
arg_exprs = [] arg_exprs = []
for arg in self.args: for arg in self.args:
@ -213,14 +239,14 @@ class Call(object):
[ex_call( [ex_call(
'map', 'map',
[ [
ex_rvalue('unicode'), ex_rvalue(six.text_type.__name__),
ast.List(subexprs, ast.Load()), ast.List(subexprs, ast.Load()),
] ]
)], )],
)) ))
subexpr_call = ex_call( subexpr_call = ex_call(
FUNCTION_PREFIX + self.ident.encode('utf8'), FUNCTION_PREFIX + ident,
arg_exprs arg_exprs
) )
return [subexpr_call], varnames, funcnames return [subexpr_call], varnames, funcnames
@ -242,11 +268,11 @@ class Expression(object):
""" """
out = [] out = []
for part in self.parts: for part in self.parts:
if isinstance(part, basestring): if isinstance(part, six.string_types):
out.append(part) out.append(part)
else: else:
out.append(part.evaluate(env)) out.append(part.evaluate(env))
return u''.join(map(unicode, out)) return u''.join(map(six.text_type, out))
def translate(self): def translate(self):
"""Compile the expression to a list of Python AST expressions, a """Compile the expression to a list of Python AST expressions, a
@ -256,7 +282,7 @@ class Expression(object):
varnames = set() varnames = set()
funcnames = set() funcnames = set()
for part in self.parts: for part in self.parts:
if isinstance(part, basestring): if isinstance(part, six.string_types):
expressions.append(ex_literal(part)) expressions.append(ex_literal(part))
else: else:
e, v, f = part.translate() e, v, f = part.translate()
@ -285,16 +311,24 @@ class Parser(object):
replaced with a real, accepted parsing technique (PEG, parser replaced with a real, accepted parsing technique (PEG, parser
generator, etc.). generator, etc.).
""" """
def __init__(self, string): def __init__(self, string, in_argument=False):
""" Create a new parser.
:param in_arguments: boolean that indicates the parser is to be
used for parsing function arguments, ie. considering commas
(`ARG_SEP`) a special character
"""
self.string = string self.string = string
self.in_argument = in_argument
self.pos = 0 self.pos = 0
self.parts = [] self.parts = []
# Common parsing resources. # Common parsing resources.
special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE, special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE,
ARG_SEP, ESCAPE_CHAR) ESCAPE_CHAR)
special_char_re = re.compile(r'[%s]|$' % special_char_re = re.compile(r'[%s]|\Z' %
u''.join(re.escape(c) for c in special_chars)) u''.join(re.escape(c) for c in special_chars))
escapable_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP)
terminator_chars = (GROUP_CLOSE,)
def parse_expression(self): def parse_expression(self):
"""Parse a template expression starting at ``pos``. Resulting """Parse a template expression starting at ``pos``. Resulting
@ -302,16 +336,29 @@ class Parser(object):
the ``parts`` field, a list. The ``pos`` field is updated to be the ``parts`` field, a list. The ``pos`` field is updated to be
the next character after the expression. the next character after the expression.
""" """
# Append comma (ARG_SEP) to the list of special characters only when
# parsing function arguments.
extra_special_chars = ()
special_char_re = self.special_char_re
if self.in_argument:
extra_special_chars = (ARG_SEP,)
special_char_re = re.compile(
r'[%s]|\Z' % u''.join(
re.escape(c) for c in
self.special_chars + extra_special_chars
)
)
text_parts = [] text_parts = []
while self.pos < len(self.string): while self.pos < len(self.string):
char = self.string[self.pos] char = self.string[self.pos]
if char not in self.special_chars: if char not in self.special_chars + extra_special_chars:
# A non-special character. Skip to the next special # A non-special character. Skip to the next special
# character, treating the interstice as literal text. # character, treating the interstice as literal text.
next_pos = ( next_pos = (
self.special_char_re.search( special_char_re.search(
self.string[self.pos:]).start() + self.pos self.string[self.pos:]).start() + self.pos
) )
text_parts.append(self.string[self.pos:next_pos]) text_parts.append(self.string[self.pos:next_pos])
@ -322,14 +369,14 @@ class Parser(object):
# The last character can never begin a structure, so we # The last character can never begin a structure, so we
# just interpret it as a literal character (unless it # just interpret it as a literal character (unless it
# terminates the expression, as with , and }). # terminates the expression, as with , and }).
if char not in (GROUP_CLOSE, ARG_SEP): if char not in self.terminator_chars + extra_special_chars:
text_parts.append(char) text_parts.append(char)
self.pos += 1 self.pos += 1
break break
next_char = self.string[self.pos + 1] next_char = self.string[self.pos + 1]
if char == ESCAPE_CHAR and next_char in \ if char == ESCAPE_CHAR and next_char in (self.escapable_chars +
(SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP): extra_special_chars):
# An escaped special character ($$, $}, etc.). Note that # An escaped special character ($$, $}, etc.). Note that
# ${ is not an escape sequence: this is ambiguous with # ${ is not an escape sequence: this is ambiguous with
# the start of a symbol and it's not necessary (just # the start of a symbol and it's not necessary (just
@ -349,7 +396,7 @@ class Parser(object):
elif char == FUNC_DELIM: elif char == FUNC_DELIM:
# Parse a function call. # Parse a function call.
self.parse_call() self.parse_call()
elif char in (GROUP_CLOSE, ARG_SEP): elif char in self.terminator_chars + extra_special_chars:
# Template terminated. # Template terminated.
break break
elif char == GROUP_OPEN: elif char == GROUP_OPEN:
@ -457,7 +504,7 @@ class Parser(object):
expressions = [] expressions = []
while self.pos < len(self.string): while self.pos < len(self.string):
subparser = Parser(self.string[self.pos:]) subparser = Parser(self.string[self.pos:], in_argument=True)
subparser.parse_expression() subparser.parse_expression()
# Extract and advance past the parsed expression. # Extract and advance past the parsed expression.
@ -526,8 +573,9 @@ class Template(object):
""" """
try: try:
res = self.compiled(values, functions) res = self.compiled(values, functions)
except: # Handle any exceptions thrown by compiled version. except Exception: # Handle any exceptions thrown by compiled version.
res = self.interpret(values, functions) res = self.interpret(values, functions)
return res return res
def translate(self): def translate(self):
@ -563,7 +611,7 @@ if __name__ == '__main__':
import timeit import timeit
_tmpl = Template(u'foo $bar %baz{foozle $bar barzle} $bar') _tmpl = Template(u'foo $bar %baz{foozle $bar barzle} $bar')
_vars = {'bar': 'qux'} _vars = {'bar': 'qux'}
_funcs = {'baz': unicode.upper} _funcs = {'baz': six.text_type.upper}
interp_time = timeit.timeit('_tmpl.interpret(_vars, _funcs)', interp_time = timeit.timeit('_tmpl.interpret(_vars, _funcs)',
'from __main__ import _tmpl, _vars, _funcs', 'from __main__ import _tmpl, _vars, _funcs',
number=10000) number=10000)

View file

@ -20,6 +20,7 @@ import os
import stat import stat
import ctypes import ctypes
import sys import sys
import beets.util
def _is_hidden_osx(path): def _is_hidden_osx(path):
@ -27,7 +28,7 @@ def _is_hidden_osx(path):
This uses os.lstat to work out if a file has the "hidden" flag. This uses os.lstat to work out if a file has the "hidden" flag.
""" """
file_stat = os.lstat(path) file_stat = os.lstat(beets.util.syspath(path))
if hasattr(file_stat, 'st_flags') and hasattr(stat, 'UF_HIDDEN'): if hasattr(file_stat, 'st_flags') and hasattr(stat, 'UF_HIDDEN'):
return bool(file_stat.st_flags & stat.UF_HIDDEN) return bool(file_stat.st_flags & stat.UF_HIDDEN)
@ -45,7 +46,7 @@ def _is_hidden_win(path):
hidden_mask = 2 hidden_mask = 2
# Retrieve the attributes for the file. # Retrieve the attributes for the file.
attrs = ctypes.windll.kernel32.GetFileAttributesW(path) attrs = ctypes.windll.kernel32.GetFileAttributesW(beets.util.syspath(path))
# Ensure we have valid attribues and compare them against the mask. # Ensure we have valid attribues and compare them against the mask.
return attrs >= 0 and attrs & hidden_mask return attrs >= 0 and attrs & hidden_mask
@ -56,11 +57,12 @@ def _is_hidden_dot(path):
Files starting with a dot are seen as "hidden" files on Unix-based OSes. Files starting with a dot are seen as "hidden" files on Unix-based OSes.
""" """
return os.path.basename(path).startswith('.') return os.path.basename(path).startswith(b'.')
def is_hidden(path): def is_hidden(path):
"""Return whether or not a file is hidden. """Return whether or not a file is hidden. `path` should be a
bytestring filename.
This method works differently depending on the platform it is called on. This method works differently depending on the platform it is called on.
@ -73,10 +75,6 @@ def is_hidden(path):
On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to
work out if a file is hidden. work out if a file is hidden.
""" """
# Convert the path to unicode if it is not already.
if not isinstance(path, unicode):
path = path.decode('utf-8')
# Run platform specific functions depending on the platform # Run platform specific functions depending on the platform
if sys.platform == 'darwin': if sys.platform == 'darwin':
return _is_hidden_osx(path) or _is_hidden_dot(path) return _is_hidden_osx(path) or _is_hidden_dot(path)

View file

@ -34,9 +34,10 @@ in place of any single coroutine.
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import Queue from six.moves import queue
from threading import Thread, Lock from threading import Thread, Lock
import sys import sys
import six
BUBBLE = '__PIPELINE_BUBBLE__' BUBBLE = '__PIPELINE_BUBBLE__'
POISON = '__PIPELINE_POISON__' POISON = '__PIPELINE_POISON__'
@ -63,7 +64,17 @@ def _invalidate_queue(q, val=None, sync=True):
q.mutex.acquire() q.mutex.acquire()
try: try:
q.maxsize = 0 # Originally, we set `maxsize` to 0 here, which is supposed to mean
# an unlimited queue size. However, there is a race condition since
# Python 3.2 when this attribute is changed while another thread is
# waiting in put()/get() due to a full/empty queue.
# Setting it to 2 is still hacky because Python does not give any
# guarantee what happens if Queue methods/attributes are overwritten
# when it is already in use. However, because of our dummy _put()
# and _get() methods, it provides a workaround to let the queue appear
# to be never empty or full.
# See issue https://github.com/beetbox/beets/issues/2078
q.maxsize = 2
q._qsize = _qsize q._qsize = _qsize
q._put = _put q._put = _put
q._get = _get q._get = _get
@ -75,13 +86,13 @@ def _invalidate_queue(q, val=None, sync=True):
q.mutex.release() q.mutex.release()
class CountedQueue(Queue.Queue): class CountedQueue(queue.Queue):
"""A queue that keeps track of the number of threads that are """A queue that keeps track of the number of threads that are
still feeding into it. The queue is poisoned when all threads are still feeding into it. The queue is poisoned when all threads are
finished with the queue. finished with the queue.
""" """
def __init__(self, maxsize=0): def __init__(self, maxsize=0):
Queue.Queue.__init__(self, maxsize) queue.Queue.__init__(self, maxsize)
self.nthreads = 0 self.nthreads = 0
self.poisoned = False self.poisoned = False
@ -259,7 +270,7 @@ class FirstPipelineThread(PipelineThread):
return return
self.out_queue.put(msg) self.out_queue.put(msg)
except: except BaseException:
self.abort_all(sys.exc_info()) self.abort_all(sys.exc_info())
return return
@ -307,7 +318,7 @@ class MiddlePipelineThread(PipelineThread):
return return
self.out_queue.put(msg) self.out_queue.put(msg)
except: except BaseException:
self.abort_all(sys.exc_info()) self.abort_all(sys.exc_info())
return return
@ -346,7 +357,7 @@ class LastPipelineThread(PipelineThread):
# Send to consumer. # Send to consumer.
self.coro.send(msg) self.coro.send(msg)
except: except BaseException:
self.abort_all(sys.exc_info()) self.abort_all(sys.exc_info())
return return
@ -411,10 +422,10 @@ class Pipeline(object):
try: try:
# Using a timeout allows us to receive KeyboardInterrupt # Using a timeout allows us to receive KeyboardInterrupt
# exceptions during the join(). # exceptions during the join().
while threads[-1].isAlive(): while threads[-1].is_alive():
threads[-1].join(1) threads[-1].join(1)
except: except BaseException:
# Stop all the threads immediately. # Stop all the threads immediately.
for thread in threads: for thread in threads:
thread.abort() thread.abort()
@ -431,7 +442,7 @@ class Pipeline(object):
exc_info = thread.exc_info exc_info = thread.exc_info
if exc_info: if exc_info:
# Make the exception appear as it was raised originally. # Make the exception appear as it was raised originally.
raise exc_info[0], exc_info[1], exc_info[2] six.reraise(exc_info[0], exc_info[1], exc_info[2])
def pull(self): def pull(self):
"""Yield elements from the end of the pipeline. Runs the stages """Yield elements from the end of the pipeline. Runs the stages

167
libs/beetsplug/absubmit.py Normal file
View file

@ -0,0 +1,167 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Pieter Mulder.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Calculate acoustic information and submit to AcousticBrainz.
"""
from __future__ import division, absolute_import, print_function
import errno
import hashlib
import json
import os
import subprocess
import tempfile
from distutils.spawn import find_executable
import requests
from beets import plugins
from beets import util
from beets import ui
class ABSubmitError(Exception):
"""Raised when failing to analyse file with extractor."""
def call(args):
"""Execute the command and return its output.
Raise a AnalysisABSubmitError on failure.
"""
try:
return util.command_output(args)
except subprocess.CalledProcessError as e:
raise ABSubmitError(
u'{0} exited with status {1}'.format(args[0], e.returncode)
)
class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
def __init__(self):
super(AcousticBrainzSubmitPlugin, self).__init__()
self.config.add({'extractor': u''})
self.extractor = self.config['extractor'].as_str()
if self.extractor:
self.extractor = util.normpath(self.extractor)
# Expicit path to extractor
if not os.path.isfile(self.extractor):
raise ui.UserError(
u'Extractor command does not exist: {0}.'.
format(self.extractor)
)
else:
# Implicit path to extractor, search for it in path
self.extractor = 'streaming_extractor_music'
try:
call([self.extractor])
except OSError:
raise ui.UserError(
u'No extractor command found: please install the '
u'extractor binary from http://acousticbrainz.org/download'
)
except ABSubmitError:
# Extractor found, will exit with an error if not called with
# the correct amount of arguments.
pass
# Get the executable location on the system, which we need
# to calculate the SHA-1 hash.
self.extractor = find_executable(self.extractor)
# Calculate extractor hash.
self.extractor_sha = hashlib.sha1()
with open(self.extractor, 'rb') as extractor:
self.extractor_sha.update(extractor.read())
self.extractor_sha = self.extractor_sha.hexdigest()
base_url = 'https://acousticbrainz.org/api/v1/{mbid}/low-level'
def commands(self):
cmd = ui.Subcommand(
'absubmit',
help=u'calculate and submit AcousticBrainz analysis'
)
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
# Get items from arguments
items = lib.items(ui.decargs(args))
for item in items:
analysis = self._get_analysis(item)
if analysis:
self._submit_data(item, analysis)
def _get_analysis(self, item):
mbid = item['mb_trackid']
# If file has no mbid skip it.
if not mbid:
self._log.info(u'Not analysing {}, missing '
u'musicbrainz track id.', item)
return None
# Temporary file to save extractor output to, extractor only works
# if an output file is given. Here we use a temporary file to copy
# the data into a python object and then remove the file from the
# system.
tmp_file, filename = tempfile.mkstemp(suffix='.json')
try:
# Close the file, so the extractor can overwrite it.
os.close(tmp_file)
try:
call([self.extractor, util.syspath(item.path), filename])
except ABSubmitError as e:
self._log.warning(
u'Failed to analyse {item} for AcousticBrainz: {error}',
item=item, error=e
)
return None
with open(filename, 'rb') as tmp_file:
analysis = json.load(tmp_file)
# Add the hash to the output.
analysis['metadata']['version']['essentia_build_sha'] = \
self.extractor_sha
return analysis
finally:
try:
os.remove(filename)
except OSError as e:
# ENOENT means file does not exist, just ignore this error.
if e.errno != errno.ENOENT:
raise
def _submit_data(self, item, data):
mbid = item['mb_trackid']
headers = {'Content-Type': 'application/json'}
response = requests.post(self.base_url.format(mbid=mbid),
json=data, headers=headers)
# Test that request was successful and raise an error on failure.
if response.status_code != 200:
try:
message = response.json()['message']
except (ValueError, KeyError) as e:
message = u'unable to get error message: {}'.format(e)
self._log.error(
u'Failed to submit AcousticBrainz analysis of {item}: '
u'{message}).', item=item, message=message
)
else:
self._log.debug(u'Successfully submitted AcousticBrainz analysis '
u'for {}.', item)

View file

@ -18,20 +18,101 @@
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import requests import requests
import operator
from collections import defaultdict
from beets import plugins, ui from beets import plugins, ui
from functools import reduce
ACOUSTIC_BASE = "https://acousticbrainz.org/" ACOUSTIC_BASE = "https://acousticbrainz.org/"
LEVELS = ["/low-level", "/high-level"] LEVELS = ["/low-level", "/high-level"]
ABSCHEME = {
'highlevel': {
'danceability': {
'all': {
'danceable': 'danceable'
}
},
'gender': {
'value': 'gender'
},
'genre_rosamerica': {
'value': 'genre_rosamerica'
},
'mood_acoustic': {
'all': {
'acoustic': 'mood_acoustic'
}
},
'mood_aggressive': {
'all': {
'aggressive': 'mood_aggressive'
}
},
'mood_electronic': {
'all': {
'electronic': 'mood_electronic'
}
},
'mood_happy': {
'all': {
'happy': 'mood_happy'
}
},
'mood_party': {
'all': {
'party': 'mood_party'
}
},
'mood_relaxed': {
'all': {
'relaxed': 'mood_relaxed'
}
},
'mood_sad': {
'all': {
'sad': 'mood_sad'
}
},
'ismir04_rhythm': {
'value': 'rhythm'
},
'tonal_atonal': {
'all': {
'tonal': 'tonal'
}
},
'voice_instrumental': {
'value': 'voice_instrumental'
},
},
'lowlevel': {
'average_loudness': 'average_loudness'
},
'rhythm': {
'bpm': 'bpm'
},
'tonal': {
'chords_changes_rate': 'chords_changes_rate',
'chords_key': 'chords_key',
'chords_number_rate': 'chords_number_rate',
'chords_scale': 'chords_scale',
'key_key': ('initial_key', 0),
'key_scale': ('initial_key', 1),
'key_strength': 'key_strength'
}
}
class AcousticPlugin(plugins.BeetsPlugin): class AcousticPlugin(plugins.BeetsPlugin):
def __init__(self): def __init__(self):
super(AcousticPlugin, self).__init__() super(AcousticPlugin, self).__init__()
self.config.add({'auto': True}) self.config.add({
'auto': True,
'force': False,
'tags': []
})
if self.config['auto']: if self.config['auto']:
self.register_listener('import_task_files', self.register_listener('import_task_files',
self.import_task_files) self.import_task_files)
@ -39,10 +120,16 @@ class AcousticPlugin(plugins.BeetsPlugin):
def commands(self): def commands(self):
cmd = ui.Subcommand('acousticbrainz', cmd = ui.Subcommand('acousticbrainz',
help=u"fetch metadata from AcousticBrainz") help=u"fetch metadata from AcousticBrainz")
cmd.parser.add_option(
u'-f', u'--force', dest='force_refetch',
action='store_true', default=False,
help=u're-download data when already present'
)
def func(lib, opts, args): def func(lib, opts, args):
items = lib.items(ui.decargs(args)) items = lib.items(ui.decargs(args))
fetch_info(self._log, items, ui.should_write()) self._fetch_info(items, ui.should_write(),
opts.force_refetch or self.config['force'])
cmd.func = func cmd.func = func
return [cmd] return [cmd]
@ -50,116 +137,169 @@ class AcousticPlugin(plugins.BeetsPlugin):
def import_task_files(self, session, task): def import_task_files(self, session, task):
"""Function is called upon beet import. """Function is called upon beet import.
""" """
self._fetch_info(task.imported_items(), False, True)
items = task.imported_items() def _get_data(self, mbid):
fetch_info(self._log, items, False) data = {}
for url in _generate_urls(mbid):
self._log.debug(u'fetching URL: {}', url)
def fetch_info(log, items, write):
"""Get data from AcousticBrainz for the items.
"""
def get_value(*map_path):
try: try:
return reduce(operator.getitem, map_path, data) res = requests.get(url)
except KeyError:
log.debug(u'Invalid Path: {}', map_path)
for item in items:
if item.mb_trackid:
log.info(u'getting data for: {}', item)
# Fetch the data from the AB API.
urls = [generate_url(item.mb_trackid, path) for path in LEVELS]
log.debug(u'fetching URLs: {}', ' '.join(urls))
try:
res = [requests.get(url) for url in urls]
except requests.RequestException as exc: except requests.RequestException as exc:
log.info(u'request error: {}', exc) self._log.info(u'request error: {}', exc)
continue return {}
# Check for missing tracks. if res.status_code == 404:
if any(r.status_code == 404 for r in res): self._log.info(u'recording ID {} not found', mbid)
log.info(u'recording ID {} not found', item.mb_trackid) return {}
continue
# Parse the JSON response.
try: try:
data = res[0].json() data.update(res.json())
data.update(res[1].json())
except ValueError: except ValueError:
log.debug(u'Invalid Response: {} & {}', [r.text for r in res]) self._log.debug(u'Invalid Response: {}', res.text)
return {}
# Get each field and assign it on the item. return data
item.danceable = get_value(
"highlevel", "danceability", "all", "danceable",
)
item.gender = get_value(
"highlevel", "gender", "value",
)
item.genre_rosamerica = get_value(
"highlevel", "genre_rosamerica", "value"
)
item.mood_acoustic = get_value(
"highlevel", "mood_acoustic", "all", "acoustic"
)
item.mood_aggressive = get_value(
"highlevel", "mood_aggressive", "all", "aggressive"
)
item.mood_electronic = get_value(
"highlevel", "mood_electronic", "all", "electronic"
)
item.mood_happy = get_value(
"highlevel", "mood_happy", "all", "happy"
)
item.mood_party = get_value(
"highlevel", "mood_party", "all", "party"
)
item.mood_relaxed = get_value(
"highlevel", "mood_relaxed", "all", "relaxed"
)
item.mood_sad = get_value(
"highlevel", "mood_sad", "all", "sad"
)
item.rhythm = get_value(
"highlevel", "ismir04_rhythm", "value"
)
item.tonal = get_value(
"highlevel", "tonal_atonal", "all", "tonal"
)
item.voice_instrumental = get_value(
"highlevel", "voice_instrumental", "value"
)
item.average_loudness = get_value(
"lowlevel", "average_loudness"
)
item.chords_changes_rate = get_value(
"tonal", "chords_changes_rate"
)
item.chords_key = get_value(
"tonal", "chords_key"
)
item.chords_number_rate = get_value(
"tonal", "chords_number_rate"
)
item.chords_scale = get_value(
"tonal", "chords_scale"
)
item.initial_key = '{} {}'.format(
get_value("tonal", "key_key"),
get_value("tonal", "key_scale")
)
item.key_strength = get_value(
"tonal", "key_strength"
)
# Store the data. def _fetch_info(self, items, write, force):
"""Fetch additional information from AcousticBrainz for the `item`s.
"""
tags = self.config['tags'].as_str_seq()
for item in items:
# If we're not forcing re-downloading for all tracks, check
# whether the data is already present. We use one
# representative field name to check for previously fetched
# data.
if not force:
mood_str = item.get('mood_acoustic', u'')
if mood_str:
self._log.info(u'data already present for: {}', item)
continue
# We can only fetch data for tracks with MBIDs.
if not item.mb_trackid:
continue
self._log.info(u'getting data for: {}', item)
data = self._get_data(item.mb_trackid)
if data:
for attr, val in self._map_data_to_scheme(data, ABSCHEME):
if not tags or attr in tags:
self._log.debug(u'attribute {} of {} set to {}',
attr,
item,
val)
setattr(item, attr, val)
else:
self._log.debug(u'skipping attribute {} of {}'
u' (value {}) due to config',
attr,
item,
val)
item.store() item.store()
if write: if write:
item.try_write() item.try_write()
def _map_data_to_scheme(self, data, scheme):
"""Given `data` as a structure of nested dictionaries, and `scheme` as a
structure of nested dictionaries , `yield` tuples `(attr, val)` where
`attr` and `val` are corresponding leaf nodes in `scheme` and `data`.
def generate_url(mbid, level): As its name indicates, `scheme` defines how the data is structured,
"""Generates AcousticBrainz end point url for given MBID. so this function tries to find leaf nodes in `data` that correspond
to the leafs nodes of `scheme`, and not the other way around.
Leaf nodes of `data` that do not exist in the `scheme` do not matter.
If a leaf node of `scheme` is not present in `data`,
no value is yielded for that attribute and a simple warning is issued.
Finally, to account for attributes of which the value is split between
several leaf nodes in `data`, leaf nodes of `scheme` can be tuples
`(attr, order)` where `attr` is the attribute to which the leaf node
belongs, and `order` is the place at which it should appear in the
value. The different `value`s belonging to the same `attr` are simply
joined with `' '`. This is hardcoded and not very flexible, but it gets
the job done.
For example:
>>> scheme = {
'key1': 'attribute',
'key group': {
'subkey1': 'subattribute',
'subkey2': ('composite attribute', 0)
},
'key2': ('composite attribute', 1)
}
>>> data = {
'key1': 'value',
'key group': {
'subkey1': 'subvalue',
'subkey2': 'part 1 of composite attr'
},
'key2': 'part 2'
}
>>> print(list(_map_data_to_scheme(data, scheme)))
[('subattribute', 'subvalue'),
('attribute', 'value'),
('composite attribute', 'part 1 of composite attr part 2')]
""" """
return ACOUSTIC_BASE + mbid + level # First, we traverse `scheme` and `data`, `yield`ing all the non
# composites attributes straight away and populating the dictionary
# `composites` with the composite attributes.
# When we are finished traversing `scheme`, `composites` should
# map each composite attribute to an ordered list of the values
# belonging to the attribute, for example:
# `composites = {'initial_key': ['B', 'minor']}`.
# The recursive traversal.
composites = defaultdict(list)
for attr, val in self._data_to_scheme_child(data,
scheme,
composites):
yield attr, val
# When composites has been populated, yield the composite attributes
# by joining their parts.
for composite_attr, value_parts in composites.items():
yield composite_attr, ' '.join(value_parts)
def _data_to_scheme_child(self, subdata, subscheme, composites):
"""The recursive business logic of :meth:`_map_data_to_scheme`:
Traverse two structures of nested dictionaries in parallel and `yield`
tuples of corresponding leaf nodes.
If a leaf node belongs to a composite attribute (is a `tuple`),
populate `composites` rather than yielding straight away.
All the child functions for a single traversal share the same
`composites` instance, which is passed along.
"""
for k, v in subscheme.items():
if k in subdata:
if type(v) == dict:
for attr, val in self._data_to_scheme_child(subdata[k],
v,
composites):
yield attr, val
elif type(v) == tuple:
composite_attribute, part_number = v
attribute_parts = composites[composite_attribute]
# Parts are not guaranteed to be inserted in order
while len(attribute_parts) <= part_number:
attribute_parts.append('')
attribute_parts[part_number] = subdata[k]
else:
yield v, subdata[k]
else:
self._log.warning(u'Acousticbrainz did not provide info'
u'about {}', k)
self._log.debug(u'Data {} could not be mapped to scheme {} '
u'because key {} was not found', subdata, v, k)
def _generate_urls(mbid):
"""Generates AcousticBrainz end point urls for given `mbid`.
"""
for level in LEVELS:
yield ACOUSTIC_BASE + mbid + level

View file

@ -27,6 +27,24 @@ import shlex
import os import os
import errno import errno
import sys import sys
import six
class CheckerCommandException(Exception):
"""Raised when running a checker failed.
Attributes:
checker: Checker command name.
path: Path to the file being validated.
errno: Error number from the checker execution error.
msg: Message from the checker execution error.
"""
def __init__(self, cmd, oserror):
self.checker = cmd[0]
self.path = cmd[-1]
self.errno = oserror.errno
self.msg = str(oserror)
class BadFiles(BeetsPlugin): class BadFiles(BeetsPlugin):
@ -42,11 +60,7 @@ class BadFiles(BeetsPlugin):
errors = 1 errors = 1
status = e.returncode status = e.returncode
except OSError as e: except OSError as e:
if e.errno == errno.ENOENT: raise CheckerCommandException(cmd, e)
ui.print_(u"command not found: {}".format(cmd[0]))
sys.exit(1)
else:
raise
output = output.decode(sys.getfilesystemencoding()) output = output.decode(sys.getfilesystemencoding())
return status, errors, [line for line in output.split("\n") if line] return status, errors, [line for line in output.split("\n") if line]
@ -92,29 +106,47 @@ class BadFiles(BeetsPlugin):
ui.colorize('text_error', dpath))) ui.colorize('text_error', dpath)))
# Run the checker against the file if one is found # Run the checker against the file if one is found
ext = os.path.splitext(item.path)[1][1:] ext = os.path.splitext(item.path)[1][1:].decode('utf8', 'ignore')
checker = self.get_checker(ext) checker = self.get_checker(ext)
if not checker: if not checker:
self._log.error(u"no checker specified in the config for {}",
ext)
continue continue
path = item.path path = item.path
if not isinstance(path, unicode): if not isinstance(path, six.text_type):
path = item.path.decode(sys.getfilesystemencoding()) path = item.path.decode(sys.getfilesystemencoding())
try:
status, errors, output = checker(path) status, errors, output = checker(path)
except CheckerCommandException as e:
if e.errno == errno.ENOENT:
self._log.error(
u"command not found: {} when validating file: {}",
e.checker,
e.path
)
else:
self._log.error(u"error invoking {}: {}", e.checker, e.msg)
continue
if status > 0: if status > 0:
ui.print_(u"{}: checker exited withs status {}" ui.print_(u"{}: checker exited with status {}"
.format(ui.colorize('text_error', dpath), status)) .format(ui.colorize('text_error', dpath), status))
for line in output: for line in output:
ui.print_(" {}".format(displayable_path(line))) ui.print_(u" {}".format(displayable_path(line)))
elif errors > 0: elif errors > 0:
ui.print_(u"{}: checker found {} errors or warnings" ui.print_(u"{}: checker found {} errors or warnings"
.format(ui.colorize('text_warning', dpath), errors)) .format(ui.colorize('text_warning', dpath), errors))
for line in output: for line in output:
ui.print_(u" {}".format(displayable_path(line))) ui.print_(u" {}".format(displayable_path(line)))
else: elif opts.verbose:
ui.print_(u"{}: ok".format(ui.colorize('text_success', dpath))) ui.print_(u"{}: ok".format(ui.colorize('text_success', dpath)))
def commands(self): def commands(self):
bad_command = Subcommand('bad', bad_command = Subcommand('bad',
help=u'check for corrupt or missing files') help=u'check for corrupt or missing files')
bad_command.parser.add_option(
u'-v', u'--verbose',
action='store_true', default=False, dest='verbose',
help=u'view results for both the bad and uncorrupted files'
)
bad_command.func = self.check_bad bad_command.func = self.check_bad
return [bad_command] return [bad_command]

461
libs/beetsplug/beatport.py Normal file
View file

@ -0,0 +1,461 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Beatport release and track search support to the autotagger
"""
from __future__ import division, absolute_import, print_function
import json
import re
import six
from datetime import datetime, timedelta
from requests_oauthlib import OAuth1Session
from requests_oauthlib.oauth1_session import (TokenRequestDenied, TokenMissing,
VerifierMissing)
import beets
import beets.ui
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
AUTH_ERRORS = (TokenRequestDenied, TokenMissing, VerifierMissing)
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
class BeatportAPIError(Exception):
pass
class BeatportObject(object):
def __init__(self, data):
self.beatport_id = data['id']
self.name = six.text_type(data['name'])
if 'releaseDate' in data:
self.release_date = datetime.strptime(data['releaseDate'],
'%Y-%m-%d')
if 'artists' in data:
self.artists = [(x['id'], six.text_type(x['name']))
for x in data['artists']]
if 'genres' in data:
self.genres = [six.text_type(x['name'])
for x in data['genres']]
class BeatportClient(object):
_api_base = 'https://oauth-api.beatport.com'
def __init__(self, c_key, c_secret, auth_key=None, auth_secret=None):
""" Initiate the client with OAuth information.
For the initial authentication with the backend `auth_key` and
`auth_secret` can be `None`. Use `get_authorize_url` and
`get_access_token` to obtain them for subsequent uses of the API.
:param c_key: OAuth1 client key
:param c_secret: OAuth1 client secret
:param auth_key: OAuth1 resource owner key
:param auth_secret: OAuth1 resource owner secret
"""
self.api = OAuth1Session(
client_key=c_key, client_secret=c_secret,
resource_owner_key=auth_key,
resource_owner_secret=auth_secret,
callback_uri='oob')
self.api.headers = {'User-Agent': USER_AGENT}
def get_authorize_url(self):
""" Generate the URL for the user to authorize the application.
Retrieves a request token from the Beatport API and returns the
corresponding authorization URL on their end that the user has
to visit.
This is the first step of the initial authorization process with the
API. Once the user has visited the URL, call
:py:method:`get_access_token` with the displayed data to complete
the process.
:returns: Authorization URL for the user to visit
:rtype: unicode
"""
self.api.fetch_request_token(
self._make_url('/identity/1/oauth/request-token'))
return self.api.authorization_url(
self._make_url('/identity/1/oauth/authorize'))
def get_access_token(self, auth_data):
""" Obtain the final access token and secret for the API.
:param auth_data: URL-encoded authorization data as displayed at
the authorization url (obtained via
:py:meth:`get_authorize_url`) after signing in
:type auth_data: unicode
:returns: OAuth resource owner key and secret
:rtype: (unicode, unicode) tuple
"""
self.api.parse_authorization_response(
"http://beets.io/auth?" + auth_data)
access_data = self.api.fetch_access_token(
self._make_url('/identity/1/oauth/access-token'))
return access_data['oauth_token'], access_data['oauth_token_secret']
def search(self, query, release_type='release', details=True):
""" Perform a search of the Beatport catalogue.
:param query: Query string
:param release_type: Type of releases to search for, can be
'release' or 'track'
:param details: Retrieve additional information about the
search results. Currently this will fetch
the tracklist for releases and do nothing for
tracks
:returns: Search results
:rtype: generator that yields
py:class:`BeatportRelease` or
:py:class:`BeatportTrack`
"""
response = self._get('catalog/3/search',
query=query, perPage=5,
facets=['fieldType:{0}'.format(release_type)])
for item in response:
if release_type == 'release':
if details:
release = self.get_release(item['id'])
else:
release = BeatportRelease(item)
yield release
elif release_type == 'track':
yield BeatportTrack(item)
def get_release(self, beatport_id):
""" Get information about a single release.
:param beatport_id: Beatport ID of the release
:returns: The matching release
:rtype: :py:class:`BeatportRelease`
"""
response = self._get('/catalog/3/releases', id=beatport_id)
release = BeatportRelease(response[0])
release.tracks = self.get_release_tracks(beatport_id)
return release
def get_release_tracks(self, beatport_id):
""" Get all tracks for a given release.
:param beatport_id: Beatport ID of the release
:returns: Tracks in the matching release
:rtype: list of :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', releaseId=beatport_id,
perPage=100)
return [BeatportTrack(t) for t in response]
def get_track(self, beatport_id):
""" Get information about a single track.
:param beatport_id: Beatport ID of the track
:returns: The matching track
:rtype: :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', id=beatport_id)
return BeatportTrack(response[0])
def _make_url(self, endpoint):
""" Get complete URL for a given API endpoint. """
if not endpoint.startswith('/'):
endpoint = '/' + endpoint
return self._api_base + endpoint
def _get(self, endpoint, **kwargs):
""" Perform a GET request on a given API endpoint.
Automatically extracts result data from the response and converts HTTP
exceptions into :py:class:`BeatportAPIError` objects.
"""
try:
response = self.api.get(self._make_url(endpoint), params=kwargs)
except Exception as e:
raise BeatportAPIError("Error connecting to Beatport API: {}"
.format(e.message))
if not response:
raise BeatportAPIError(
"Error {0.status_code} for '{0.request.path_url}"
.format(response))
return response.json()['results']
@six.python_2_unicode_compatible
class BeatportRelease(BeatportObject):
def __str__(self):
if len(self.artists) < 4:
artist_str = ", ".join(x[1] for x in self.artists)
else:
artist_str = "Various Artists"
return u"<BeatportRelease: {0} - {1} ({2})>".format(
artist_str,
self.name,
self.catalog_number,
)
def __repr__(self):
return six.text_type(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'catalogNumber' in data:
self.catalog_number = data['catalogNumber']
if 'label' in data:
self.label_name = data['label']['name']
if 'category' in data:
self.category = data['category']
if 'slug' in data:
self.url = "http://beatport.com/release/{0}/{1}".format(
data['slug'], data['id'])
@six.python_2_unicode_compatible
class BeatportTrack(BeatportObject):
def __str__(self):
artist_str = ", ".join(x[1] for x in self.artists)
return (u"<BeatportTrack: {0} - {1} ({2})>"
.format(artist_str, self.name, self.mix_name))
def __repr__(self):
return six.text_type(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'title' in data:
self.title = six.text_type(data['title'])
if 'mixName' in data:
self.mix_name = six.text_type(data['mixName'])
self.length = timedelta(milliseconds=data.get('lengthMs', 0) or 0)
if not self.length:
try:
min, sec = data.get('length', '0:0').split(':')
self.length = timedelta(minutes=int(min), seconds=int(sec))
except ValueError:
pass
if 'slug' in data:
self.url = "http://beatport.com/track/{0}/{1}".format(data['slug'],
data['id'])
self.track_number = data.get('trackNumber')
class BeatportPlugin(BeetsPlugin):
def __init__(self):
super(BeatportPlugin, self).__init__()
self.config.add({
'apikey': '57713c3906af6f5def151b33601389176b37b429',
'apisecret': 'b3fe08c93c80aefd749fe871a16cd2bb32e2b954',
'tokenfile': 'beatport_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except IOError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.client = BeatportClient(c_key, c_secret, token, secret)
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = BeatportClient(c_key, c_secret)
try:
url = auth_client.get_authorize_url()
except AUTH_ERRORS as e:
self._log.debug(u'authentication error: {0}', e)
raise beets.ui.UserError(u'communication with Beatport failed')
beets.ui.print_(u"To authenticate with Beatport, visit:")
beets.ui.print_(url)
# Ask for the verifier data and validate it.
data = beets.ui.input_(u"Enter the string displayed in your browser:")
try:
token, secret = auth_client.get_access_token(data)
except AUTH_ERRORS as e:
self._log.debug(u'authentication error: {0}', e)
raise beets.ui.UserError(u'Beatport token request failed')
# Save the token for later use.
self._log.debug(u'Beatport token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confit.Filename(in_app_dir=True))
def album_distance(self, items, album_info, mapping):
"""Returns the beatport source weight and the maximum source weight
for albums.
"""
dist = Distance()
if album_info.data_source == 'Beatport':
dist.add('source', self.config['source_weight'].as_number())
return dist
def track_distance(self, item, track_info):
"""Returns the beatport source weight and the maximum source weight
for individual tracks.
"""
dist = Distance()
if track_info.data_source == 'Beatport':
dist.add('source', self.config['source_weight'].as_number())
return dist
def candidates(self, items, artist, release, va_likely):
"""Returns a list of AlbumInfo objects for beatport search results
matching release and artist (if not various).
"""
if va_likely:
query = release
else:
query = '%s %s' % (artist, release)
try:
return self._get_releases(query)
except BeatportAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
return []
def item_candidates(self, item, artist, title):
"""Returns a list of TrackInfo objects for beatport search results
matching title and artist.
"""
query = '%s %s' % (artist, title)
try:
return self._get_tracks(query)
except BeatportAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
return []
def album_for_id(self, release_id):
"""Fetches a release by its Beatport ID and returns an AlbumInfo object
or None if the release is not found.
"""
self._log.debug(u'Searching for release {0}', release_id)
match = re.search(r'(^|beatport\.com/release/.+/)(\d+)$', release_id)
if not match:
return None
release = self.client.get_release(match.group(2))
album = self._get_album_info(release)
return album
def track_for_id(self, track_id):
"""Fetches a track by its Beatport ID and returns a TrackInfo object
or None if the track is not found.
"""
self._log.debug(u'Searching for track {0}', track_id)
match = re.search(r'(^|beatport\.com/track/.+/)(\d+)$', track_id)
if not match:
return None
bp_track = self.client.get_track(match.group(2))
track = self._get_track_info(bp_track)
return track
def _get_releases(self, query):
"""Returns a list of AlbumInfo objects for a beatport search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r'\W+', ' ', query, flags=re.UNICODE)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'\b(CD|disc)\s*\d+', '', query, flags=re.I)
albums = [self._get_album_info(x)
for x in self.client.search(query)]
return albums
def _get_album_info(self, release):
"""Returns an AlbumInfo object for a Beatport Release object.
"""
va = len(release.artists) > 3
artist, artist_id = self._get_artist(release.artists)
if va:
artist = u"Various Artists"
tracks = [self._get_track_info(x) for x in release.tracks]
return AlbumInfo(album=release.name, album_id=release.beatport_id,
artist=artist, artist_id=artist_id, tracks=tracks,
albumtype=release.category, va=va,
year=release.release_date.year,
month=release.release_date.month,
day=release.release_date.day,
label=release.label_name,
catalognum=release.catalog_number, media=u'Digital',
data_source=u'Beatport', data_url=release.url)
def _get_track_info(self, track):
"""Returns a TrackInfo object for a Beatport Track object.
"""
title = track.name
if track.mix_name != u"Original Mix":
title += u" ({0})".format(track.mix_name)
artist, artist_id = self._get_artist(track.artists)
length = track.length.total_seconds()
return TrackInfo(title=title, track_id=track.beatport_id,
artist=artist, artist_id=artist_id,
length=length, index=track.track_number,
medium_index=track.track_number,
data_source=u'Beatport', data_url=track.url)
def _get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of Beatport release or track artists.
"""
artist_id = None
bits = []
for artist in artists:
if not artist_id:
artist_id = artist[0]
name = artist[1]
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'^(.*?), (a|an|the)$', r'\2 \1', name, flags=re.I)
bits.append(name)
artist = ', '.join(bits).replace(' ,', ',') or None
return artist, artist_id
def _get_tracks(self, query):
"""Returns a list of TrackInfo objects for a Beatport query.
"""
bp_tracks = self.client.search(query, release_type='track')
tracks = [self._get_track_info(x) for x in bp_tracks]
return tracks

View file

@ -35,17 +35,18 @@ from beets.util import bluelet
from beets.library import Item from beets.library import Item
from beets import dbcore from beets import dbcore
from beets.mediafile import MediaFile from beets.mediafile import MediaFile
import six
PROTOCOL_VERSION = '0.13.0' PROTOCOL_VERSION = '0.13.0'
BUFSIZE = 1024 BUFSIZE = 1024
HELLO = 'OK MPD %s' % PROTOCOL_VERSION HELLO = u'OK MPD %s' % PROTOCOL_VERSION
CLIST_BEGIN = 'command_list_begin' CLIST_BEGIN = u'command_list_begin'
CLIST_VERBOSE_BEGIN = 'command_list_ok_begin' CLIST_VERBOSE_BEGIN = u'command_list_ok_begin'
CLIST_END = 'command_list_end' CLIST_END = u'command_list_end'
RESP_OK = 'OK' RESP_OK = u'OK'
RESP_CLIST_VERBOSE = 'list_OK' RESP_CLIST_VERBOSE = u'list_OK'
RESP_ERR = 'ACK' RESP_ERR = u'ACK'
NEWLINE = u"\n" NEWLINE = u"\n"
@ -305,12 +306,12 @@ class BaseServer(object):
playlist, playlistlength, and xfade. playlist, playlistlength, and xfade.
""" """
yield ( yield (
u'volume: ' + unicode(self.volume), u'volume: ' + six.text_type(self.volume),
u'repeat: ' + unicode(int(self.repeat)), u'repeat: ' + six.text_type(int(self.repeat)),
u'random: ' + unicode(int(self.random)), u'random: ' + six.text_type(int(self.random)),
u'playlist: ' + unicode(self.playlist_version), u'playlist: ' + six.text_type(self.playlist_version),
u'playlistlength: ' + unicode(len(self.playlist)), u'playlistlength: ' + six.text_type(len(self.playlist)),
u'xfade: ' + unicode(self.crossfade), u'xfade: ' + six.text_type(self.crossfade),
) )
if self.current_index == -1: if self.current_index == -1:
@ -323,8 +324,8 @@ class BaseServer(object):
if self.current_index != -1: # i.e., paused or playing if self.current_index != -1: # i.e., paused or playing
current_id = self._item_id(self.playlist[self.current_index]) current_id = self._item_id(self.playlist[self.current_index])
yield u'song: ' + unicode(self.current_index) yield u'song: ' + six.text_type(self.current_index)
yield u'songid: ' + unicode(current_id) yield u'songid: ' + six.text_type(current_id)
if self.error: if self.error:
yield u'error: ' + self.error yield u'error: ' + self.error
@ -468,8 +469,8 @@ class BaseServer(object):
Also a dummy implementation. Also a dummy implementation.
""" """
for idx, track in enumerate(self.playlist): for idx, track in enumerate(self.playlist):
yield u'cpos: ' + unicode(idx) yield u'cpos: ' + six.text_type(idx)
yield u'Id: ' + unicode(track.id) yield u'Id: ' + six.text_type(track.id)
def cmd_currentsong(self, conn): def cmd_currentsong(self, conn):
"""Sends information about the currently-playing song. """Sends information about the currently-playing song.
@ -569,12 +570,12 @@ class Connection(object):
added after every string. Returns a Bluelet event that sends added after every string. Returns a Bluelet event that sends
the data. the data.
""" """
if isinstance(lines, basestring): if isinstance(lines, six.string_types):
lines = [lines] lines = [lines]
out = NEWLINE.join(lines) + NEWLINE out = NEWLINE.join(lines) + NEWLINE
log.debug('{}', out[:-1]) # Don't log trailing newline. log.debug('{}', out[:-1]) # Don't log trailing newline.
if isinstance(out, unicode): if isinstance(out, six.text_type):
out = out.encode('utf8') out = out.encode('utf-8')
return self.sock.sendall(out) return self.sock.sendall(out)
def do_command(self, command): def do_command(self, command):
@ -603,7 +604,8 @@ class Connection(object):
line = line.strip() line = line.strip()
if not line: if not line:
break break
log.debug('{}', line) line = line.decode('utf8') # MPD protocol uses UTF-8.
log.debug(u'{}', line)
if clist is not None: if clist is not None:
# Command list already opened. # Command list already opened.
@ -639,8 +641,8 @@ class Command(object):
"""A command issued by the client for processing by the server. """A command issued by the client for processing by the server.
""" """
command_re = re.compile(br'^([^ \t]+)[ \t]*') command_re = re.compile(r'^([^ \t]+)[ \t]*')
arg_re = re.compile(br'"((?:\\"|[^"])+)"|([^ \t"]+)') arg_re = re.compile(r'"((?:\\"|[^"])+)"|([^ \t"]+)')
def __init__(self, s): def __init__(self, s):
"""Creates a new `Command` from the given string, `s`, parsing """Creates a new `Command` from the given string, `s`, parsing
@ -655,11 +657,10 @@ class Command(object):
if match[0]: if match[0]:
# Quoted argument. # Quoted argument.
arg = match[0] arg = match[0]
arg = arg.replace(b'\\"', b'"').replace(b'\\\\', b'\\') arg = arg.replace(u'\\"', u'"').replace(u'\\\\', u'\\')
else: else:
# Unquoted argument. # Unquoted argument.
arg = match[1] arg = match[1]
arg = arg.decode('utf8')
self.args.append(arg) self.args.append(arg)
def run(self, conn): def run(self, conn):
@ -771,28 +772,28 @@ class Server(BaseServer):
def _item_info(self, item): def _item_info(self, item):
info_lines = [ info_lines = [
u'file: ' + item.destination(fragment=True), u'file: ' + item.destination(fragment=True),
u'Time: ' + unicode(int(item.length)), u'Time: ' + six.text_type(int(item.length)),
u'Title: ' + item.title, u'Title: ' + item.title,
u'Artist: ' + item.artist, u'Artist: ' + item.artist,
u'Album: ' + item.album, u'Album: ' + item.album,
u'Genre: ' + item.genre, u'Genre: ' + item.genre,
] ]
track = unicode(item.track) track = six.text_type(item.track)
if item.tracktotal: if item.tracktotal:
track += u'/' + unicode(item.tracktotal) track += u'/' + six.text_type(item.tracktotal)
info_lines.append(u'Track: ' + track) info_lines.append(u'Track: ' + track)
info_lines.append(u'Date: ' + unicode(item.year)) info_lines.append(u'Date: ' + six.text_type(item.year))
try: try:
pos = self._id_to_index(item.id) pos = self._id_to_index(item.id)
info_lines.append(u'Pos: ' + unicode(pos)) info_lines.append(u'Pos: ' + six.text_type(pos))
except ArgumentNotFoundError: except ArgumentNotFoundError:
# Don't include position if not in playlist. # Don't include position if not in playlist.
pass pass
info_lines.append(u'Id: ' + unicode(item.id)) info_lines.append(u'Id: ' + six.text_type(item.id))
return info_lines return info_lines
@ -852,7 +853,7 @@ class Server(BaseServer):
for name, itemid in iter(sorted(node.files.items())): for name, itemid in iter(sorted(node.files.items())):
item = self.lib.get_item(itemid) item = self.lib.get_item(itemid)
yield self._item_info(item) yield self._item_info(item)
for name, _ in iter(sorted(node.dirs.iteritems())): for name, _ in iter(sorted(node.dirs.items())):
dirpath = self._path_join(path, name) dirpath = self._path_join(path, name)
if dirpath.startswith(u"/"): if dirpath.startswith(u"/"):
# Strip leading slash (libmpc rejects this). # Strip leading slash (libmpc rejects this).
@ -872,12 +873,12 @@ class Server(BaseServer):
yield u'file: ' + basepath yield u'file: ' + basepath
else: else:
# List a directory. Recurse into both directories and files. # List a directory. Recurse into both directories and files.
for name, itemid in sorted(node.files.iteritems()): for name, itemid in sorted(node.files.items()):
newpath = self._path_join(basepath, name) newpath = self._path_join(basepath, name)
# "yield from" # "yield from"
for v in self._listall(newpath, itemid, info): for v in self._listall(newpath, itemid, info):
yield v yield v
for name, subdir in sorted(node.dirs.iteritems()): for name, subdir in sorted(node.dirs.items()):
newpath = self._path_join(basepath, name) newpath = self._path_join(basepath, name)
yield u'directory: ' + newpath yield u'directory: ' + newpath
for v in self._listall(newpath, subdir, info): for v in self._listall(newpath, subdir, info):
@ -902,11 +903,11 @@ class Server(BaseServer):
yield self.lib.get_item(node) yield self.lib.get_item(node)
else: else:
# Recurse into a directory. # Recurse into a directory.
for name, itemid in sorted(node.files.iteritems()): for name, itemid in sorted(node.files.items()):
# "yield from" # "yield from"
for v in self._all_items(itemid): for v in self._all_items(itemid):
yield v yield v
for name, subdir in sorted(node.dirs.iteritems()): for name, subdir in sorted(node.dirs.items()):
for v in self._all_items(subdir): for v in self._all_items(subdir):
yield v yield v
@ -917,7 +918,7 @@ class Server(BaseServer):
for item in self._all_items(self._resolve_path(path)): for item in self._all_items(self._resolve_path(path)):
self.playlist.append(item) self.playlist.append(item)
if send_id: if send_id:
yield u'Id: ' + unicode(item.id) yield u'Id: ' + six.text_type(item.id)
self.playlist_version += 1 self.playlist_version += 1
def cmd_add(self, conn, path): def cmd_add(self, conn, path):
@ -938,11 +939,11 @@ class Server(BaseServer):
if self.current_index > -1: if self.current_index > -1:
item = self.playlist[self.current_index] item = self.playlist[self.current_index]
yield u'bitrate: ' + unicode(item.bitrate / 1000) yield u'bitrate: ' + six.text_type(item.bitrate / 1000)
# Missing 'audio'. # Missing 'audio'.
(pos, total) = self.player.time() (pos, total) = self.player.time()
yield u'time: ' + unicode(pos) + u':' + unicode(total) yield u'time: ' + six.text_type(pos) + u':' + six.text_type(total)
# Also missing 'updating_db'. # Also missing 'updating_db'.
@ -957,13 +958,13 @@ class Server(BaseServer):
artists, albums, songs, totaltime = tx.query(statement)[0] artists, albums, songs, totaltime = tx.query(statement)[0]
yield ( yield (
u'artists: ' + unicode(artists), u'artists: ' + six.text_type(artists),
u'albums: ' + unicode(albums), u'albums: ' + six.text_type(albums),
u'songs: ' + unicode(songs), u'songs: ' + six.text_type(songs),
u'uptime: ' + unicode(int(time.time() - self.startup_time)), u'uptime: ' + six.text_type(int(time.time() - self.startup_time)),
u'playtime: ' + u'0', # Missing. u'playtime: ' + u'0', # Missing.
u'db_playtime: ' + unicode(int(totaltime)), u'db_playtime: ' + six.text_type(int(totaltime)),
u'db_update: ' + unicode(int(self.updated_time)), u'db_update: ' + six.text_type(int(self.updated_time)),
) )
# Searching. # Searching.
@ -1059,7 +1060,7 @@ class Server(BaseServer):
rows = tx.query(statement, subvals) rows = tx.query(statement, subvals)
for row in rows: for row in rows:
yield show_tag_canon + u': ' + unicode(row[0]) yield show_tag_canon + u': ' + six.text_type(row[0])
def cmd_count(self, conn, tag, value): def cmd_count(self, conn, tag, value):
"""Returns the number and total time of songs matching the """Returns the number and total time of songs matching the
@ -1071,8 +1072,8 @@ class Server(BaseServer):
for item in self.lib.items(dbcore.query.MatchQuery(key, value)): for item in self.lib.items(dbcore.query.MatchQuery(key, value)):
songs += 1 songs += 1
playtime += item.length playtime += item.length
yield u'songs: ' + unicode(songs) yield u'songs: ' + six.text_type(songs)
yield u'playtime: ' + unicode(int(playtime)) yield u'playtime: ' + six.text_type(int(playtime))
# "Outputs." Just a dummy implementation because we don't control # "Outputs." Just a dummy implementation because we don't control
# any outputs. # any outputs.
@ -1167,7 +1168,7 @@ class BPDPlugin(BeetsPlugin):
server.run() server.run()
except NoGstreamerError: except NoGstreamerError:
global_log.error(u'Gstreamer Python bindings not found.') global_log.error(u'Gstreamer Python bindings not found.')
global_log.error(u'Install "python-gst0.10", "py27-gst-python", ' global_log.error(u'Install "gstreamer1.0" and "python-gi"'
u'or similar package to use BPD.') u'or similar package to use BPD.')
def commands(self): def commands(self):
@ -1180,11 +1181,12 @@ class BPDPlugin(BeetsPlugin):
) )
def func(lib, opts, args): def func(lib, opts, args):
host = args.pop(0) if args else self.config['host'].get(unicode) host = self.config['host'].as_str()
host = args.pop(0) if args else host
port = args.pop(0) if args else self.config['port'].get(int) port = args.pop(0) if args else self.config['port'].get(int)
if args: if args:
raise beets.ui.UserError(u'too many arguments') raise beets.ui.UserError(u'too many arguments')
password = self.config['password'].get(unicode) password = self.config['password'].as_str()
volume = self.config['volume'].get(int) volume = self.config['volume'].get(int)
debug = opts.debug or False debug = opts.debug or False
self.start_bpd(lib, host, int(port), password, volume, debug) self.start_bpd(lib, host, int(port), password, volume, debug)

View file

@ -19,17 +19,25 @@ music player.
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import six
import sys import sys
import time import time
import gobject from six.moves import _thread
import thread
import os import os
import copy import copy
import urllib from six.moves import urllib
from beets import ui
import pygst import gi
pygst.require('0.10') gi.require_version('Gst', '1.0')
import gst # noqa from gi.repository import GLib, Gst # noqa: E402
Gst.init(None)
class QueryError(Exception):
pass
class GstPlayer(object): class GstPlayer(object):
@ -57,8 +65,19 @@ class GstPlayer(object):
# Set up the Gstreamer player. From the pygst tutorial: # Set up the Gstreamer player. From the pygst tutorial:
# http://pygstdocs.berlios.de/pygst-tutorial/playbin.html # http://pygstdocs.berlios.de/pygst-tutorial/playbin.html
self.player = gst.element_factory_make("playbin2", "player") ####
fakesink = gst.element_factory_make("fakesink", "fakesink") # Updated to GStreamer 1.0 with:
# https://wiki.ubuntu.com/Novacut/GStreamer1.0
self.player = Gst.ElementFactory.make("playbin", "player")
if self.player is None:
raise ui.UserError("Could not create playbin")
fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
if fakesink is None:
raise ui.UserError("Could not create fakesink")
self.player.set_property("video-sink", fakesink) self.player.set_property("video-sink", fakesink)
bus = self.player.get_bus() bus = self.player.get_bus()
bus.add_signal_watch() bus.add_signal_watch()
@ -74,21 +93,21 @@ class GstPlayer(object):
"""Returns the current state flag of the playbin.""" """Returns the current state flag of the playbin."""
# gst's get_state function returns a 3-tuple; we just want the # gst's get_state function returns a 3-tuple; we just want the
# status flag in position 1. # status flag in position 1.
return self.player.get_state()[1] return self.player.get_state(Gst.CLOCK_TIME_NONE)[1]
def _handle_message(self, bus, message): def _handle_message(self, bus, message):
"""Callback for status updates from GStreamer.""" """Callback for status updates from GStreamer."""
if message.type == gst.MESSAGE_EOS: if message.type == Gst.MessageType.EOS:
# file finished playing # file finished playing
self.player.set_state(gst.STATE_NULL) self.player.set_state(Gst.State.NULL)
self.playing = False self.playing = False
self.cached_time = None self.cached_time = None
if self.finished_callback: if self.finished_callback:
self.finished_callback() self.finished_callback()
elif message.type == gst.MESSAGE_ERROR: elif message.type == Gst.MessageType.ERROR:
# error # error
self.player.set_state(gst.STATE_NULL) self.player.set_state(Gst.State.NULL)
err, debug = message.parse_error() err, debug = message.parse_error()
print(u"Error: {0}".format(err)) print(u"Error: {0}".format(err))
self.playing = False self.playing = False
@ -109,27 +128,27 @@ class GstPlayer(object):
"""Immediately begin playing the audio file at the given """Immediately begin playing the audio file at the given
path. path.
""" """
self.player.set_state(gst.STATE_NULL) self.player.set_state(Gst.State.NULL)
if isinstance(path, unicode): if isinstance(path, six.text_type):
path = path.encode('utf8') path = path.encode('utf-8')
uri = 'file://' + urllib.quote(path) uri = 'file://' + urllib.parse.quote(path)
self.player.set_property("uri", uri) self.player.set_property("uri", uri)
self.player.set_state(gst.STATE_PLAYING) self.player.set_state(Gst.State.PLAYING)
self.playing = True self.playing = True
def play(self): def play(self):
"""If paused, resume playback.""" """If paused, resume playback."""
if self._get_state() == gst.STATE_PAUSED: if self._get_state() == Gst.State.PAUSED:
self.player.set_state(gst.STATE_PLAYING) self.player.set_state(Gst.State.PLAYING)
self.playing = True self.playing = True
def pause(self): def pause(self):
"""Pause playback.""" """Pause playback."""
self.player.set_state(gst.STATE_PAUSED) self.player.set_state(Gst.State.PAUSED)
def stop(self): def stop(self):
"""Halt playback.""" """Halt playback."""
self.player.set_state(gst.STATE_NULL) self.player.set_state(Gst.State.NULL)
self.playing = False self.playing = False
self.cached_time = None self.cached_time = None
@ -139,27 +158,36 @@ class GstPlayer(object):
Call this function before trying to play any music with Call this function before trying to play any music with
play_file() or play(). play_file() or play().
""" """
# If we don't use the MainLoop, messages are never sent. # If we don't use the MainLoop, messages are never sent.
gobject.threads_init()
def start(): def start():
loop = gobject.MainLoop() loop = GLib.MainLoop()
loop.run() loop.run()
thread.start_new_thread(start, ())
_thread.start_new_thread(start, ())
def time(self): def time(self):
"""Returns a tuple containing (position, length) where both """Returns a tuple containing (position, length) where both
values are integers in seconds. If no stream is available, values are integers in seconds. If no stream is available,
returns (0, 0). returns (0, 0).
""" """
fmt = gst.Format(gst.FORMAT_TIME) fmt = Gst.Format(Gst.Format.TIME)
try: try:
pos = self.player.query_position(fmt, None)[0] / (10 ** 9) posq = self.player.query_position(fmt)
length = self.player.query_duration(fmt, None)[0] / (10 ** 9) if not posq[0]:
raise QueryError("query_position failed")
pos = posq[1] // (10 ** 9)
lengthq = self.player.query_duration(fmt)
if not lengthq[0]:
raise QueryError("query_duration failed")
length = lengthq[1] // (10 ** 9)
self.cached_time = (pos, length) self.cached_time = (pos, length)
return (pos, length) return (pos, length)
except gst.QueryError: except QueryError:
# Stream not ready. For small gaps of time, for instance # Stream not ready. For small gaps of time, for instance
# after seeking, the time values are unavailable. For this # after seeking, the time values are unavailable. For this
# reason, we cache recent. # reason, we cache recent.
@ -175,9 +203,9 @@ class GstPlayer(object):
self.stop() self.stop()
return return
fmt = gst.Format(gst.FORMAT_TIME) fmt = Gst.Format(Gst.Format.TIME)
ns = position * 10 ** 9 # convert to nanoseconds ns = position * 10 ** 9 # convert to nanoseconds
self.player.seek_simple(fmt, gst.SEEK_FLAG_FLUSH, ns) self.player.seek_simple(fmt, Gst.SeekFlags.FLUSH, ns)
# save new cached time # save new cached time
self.cached_time = (position, cur_len) self.cached_time = (position, cur_len)
@ -208,12 +236,14 @@ def play_complicated(paths):
def next_song(): def next_song():
my_paths.pop(0) my_paths.pop(0)
p.play_file(my_paths[0]) p.play_file(my_paths[0])
p = GstPlayer(next_song) p = GstPlayer(next_song)
p.run() p.run()
p.play_file(my_paths[0]) p.play_file(my_paths[0])
while my_paths: while my_paths:
time.sleep(1) time.sleep(1)
if __name__ == '__main__': if __name__ == '__main__':
# A very simple command-line player. Just give it names of audio # A very simple command-line player. Just give it names of audio
# files on the command line; these are all played in sequence. # files on the command line; these are all played in sequence.

View file

@ -18,6 +18,7 @@
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import time import time
from six.moves import input
from beets import ui from beets import ui
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
@ -31,7 +32,7 @@ def bpm(max_strokes):
dt = [] dt = []
for i in range(max_strokes): for i in range(max_strokes):
# Press enter to the rhythm... # Press enter to the rhythm...
s = raw_input() s = input()
if s == '': if s == '':
t1 = time.time() t1 = time.time()
# Only start measuring at the second stroke # Only start measuring at the second stroke
@ -64,7 +65,9 @@ class BPMPlugin(BeetsPlugin):
return [cmd] return [cmd]
def command(self, lib, opts, args): def command(self, lib, opts, args):
self.get_bpm(lib.items(ui.decargs(args))) items = lib.items(ui.decargs(args))
write = ui.should_write()
self.get_bpm(items, write)
def get_bpm(self, items, write=False): def get_bpm(self, items, write=False):
overwrite = self.config['overwrite'].get(bool) overwrite = self.config['overwrite'].get(bool)

View file

@ -21,7 +21,8 @@ from __future__ import division, absolute_import, print_function
from datetime import datetime from datetime import datetime
import re import re
import string import string
from itertools import tee, izip from six.moves import zip
from itertools import tee
from beets import plugins, ui from beets import plugins, ui
@ -37,7 +38,7 @@ def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..." "s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable) a, b = tee(iterable)
next(b, None) next(b, None)
return izip(a, b) return zip(a, b)
def span_from_str(span_str): def span_from_str(span_str):
@ -137,9 +138,10 @@ def str2fmt(s):
def format_span(fmt, yearfrom, yearto, fromnchars, tonchars): def format_span(fmt, yearfrom, yearto, fromnchars, tonchars):
"""Return a span string representation. """Return a span string representation.
""" """
args = (bytes(yearfrom)[-fromnchars:]) args = (str(yearfrom)[-fromnchars:])
if tonchars: if tonchars:
args = (bytes(yearfrom)[-fromnchars:], bytes(yearto)[-tonchars:]) args = (str(yearfrom)[-fromnchars:], str(yearto)[-tonchars:])
return fmt % args return fmt % args

View file

@ -121,7 +121,7 @@ def _all_releases(items):
for release_id in release_ids: for release_id in release_ids:
relcounts[release_id] += 1 relcounts[release_id] += 1
for release_id, count in relcounts.iteritems(): for release_id, count in relcounts.items():
if float(count) / len(items) > COMMON_REL_THRESH: if float(count) / len(items) > COMMON_REL_THRESH:
yield release_id yield release_id
@ -181,7 +181,7 @@ class AcoustidPlugin(plugins.BeetsPlugin):
def submit_cmd_func(lib, opts, args): def submit_cmd_func(lib, opts, args):
try: try:
apikey = config['acoustid']['apikey'].get(unicode) apikey = config['acoustid']['apikey'].as_str()
except confit.NotFoundError: except confit.NotFoundError:
raise ui.UserError(u'no Acoustid user API key provided') raise ui.UserError(u'no Acoustid user API key provided')
submit_items(self._log, apikey, lib.items(ui.decargs(args))) submit_items(self._log, apikey, lib.items(ui.decargs(args)))
@ -236,7 +236,7 @@ def submit_items(log, userkey, items, chunksize=64):
try: try:
acoustid.submit(API_KEY, userkey, data) acoustid.submit(API_KEY, userkey, data)
except acoustid.AcoustidError as exc: except acoustid.AcoustidError as exc:
log.warn(u'acoustid submission error: {0}', exc) log.warning(u'acoustid submission error: {0}', exc)
del data[:] del data[:]
for item in items: for item in items:
@ -295,7 +295,7 @@ def fingerprint_item(log, item, write=False):
log.info(u'{0}: fingerprinting', log.info(u'{0}: fingerprinting',
util.displayable_path(item.path)) util.displayable_path(item.path))
try: try:
_, fp = acoustid.fingerprint_file(item.path) _, fp = acoustid.fingerprint_file(util.syspath(item.path))
item.acoustid_fingerprint = fp item.acoustid_fingerprint = fp
if write: if write:
log.info(u'{0}: writing fingerprint', log.info(u'{0}: writing fingerprint',

View file

@ -22,13 +22,17 @@ import threading
import subprocess import subprocess
import tempfile import tempfile
import shlex import shlex
import six
from string import Template from string import Template
import platform
from beets import ui, util, plugins, config from beets import ui, util, plugins, config
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.util.confit import ConfigTypeError from beets.util.confit import ConfigTypeError
from beets import art from beets import art
from beets.util.artresizer import ArtResizer from beets.util.artresizer import ArtResizer
from beets.library import parse_query_string
from beets.library import Item
_fs_lock = threading.Lock() _fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion. _temp_files = [] # Keep track of temporary transcoded files for deletion.
@ -47,14 +51,15 @@ def replace_ext(path, ext):
The new extension must not contain a leading dot. The new extension must not contain a leading dot.
""" """
return os.path.splitext(path)[0] + b'.' + ext ext_dot = b'.' + ext
return os.path.splitext(path)[0] + ext_dot
def get_format(fmt=None): def get_format(fmt=None):
"""Return the command template and the extension from the config. """Return the command template and the extension from the config.
""" """
if not fmt: if not fmt:
fmt = config['convert']['format'].get(unicode).lower() fmt = config['convert']['format'].as_str().lower()
fmt = ALIASES.get(fmt, fmt) fmt = ALIASES.get(fmt, fmt)
try: try:
@ -67,28 +72,34 @@ def get_format(fmt=None):
.format(fmt) .format(fmt)
) )
except ConfigTypeError: except ConfigTypeError:
command = config['convert']['formats'][fmt].get(bytes) command = config['convert']['formats'][fmt].get(str)
extension = fmt extension = fmt
# Convenience and backwards-compatibility shortcuts. # Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys() keys = config['convert'].keys()
if 'command' in keys: if 'command' in keys:
command = config['convert']['command'].get(unicode) command = config['convert']['command'].as_str()
elif 'opts' in keys: elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1. # Undocumented option for backwards compatibility with < 1.3.1.
command = u'ffmpeg -i $source -y {0} $dest'.format( command = u'ffmpeg -i $source -y {0} $dest'.format(
config['convert']['opts'].get(unicode) config['convert']['opts'].as_str()
) )
if 'extension' in keys: if 'extension' in keys:
extension = config['convert']['extension'].get(unicode) extension = config['convert']['extension'].as_str()
return (command.encode('utf8'), extension.encode('utf8')) return (command.encode('utf-8'), extension.encode('utf-8'))
def should_transcode(item, fmt): def should_transcode(item, fmt):
"""Determine whether the item should be transcoded as part of """Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format). conversion (i.e., its bitrate is high or it has the wrong format).
""" """
no_convert_queries = config['convert']['no_convert'].as_str_seq()
if no_convert_queries:
for query_string in no_convert_queries:
query, _ = parse_query_string(query_string, Item)
if query.match(item):
return False
if config['convert']['never_convert_lossy_files'] and \ if config['convert']['never_convert_lossy_files'] and \
not (item.format.lower() in LOSSLESS_FORMATS): not (item.format.lower() in LOSSLESS_FORMATS):
return False return False
@ -107,8 +118,8 @@ class ConvertPlugin(BeetsPlugin):
u'format': u'mp3', u'format': u'mp3',
u'formats': { u'formats': {
u'aac': { u'aac': {
u'command': u'ffmpeg -i $source -y -vn -acodec libfaac ' u'command': u'ffmpeg -i $source -y -vn -acodec aac '
u'-aq 100 $dest', u'-aq 1 $dest',
u'extension': u'm4a', u'extension': u'm4a',
}, },
u'alac': { u'alac': {
@ -130,11 +141,12 @@ class ConvertPlugin(BeetsPlugin):
u'quiet': False, u'quiet': False,
u'embed': True, u'embed': True,
u'paths': {}, u'paths': {},
u'no_convert': u'',
u'never_convert_lossy_files': False, u'never_convert_lossy_files': False,
u'copy_album_art': False, u'copy_album_art': False,
u'album_art_maxwidth': 0, u'album_art_maxwidth': 0,
}) })
self.import_stages = [self.auto_convert] self.early_import_stages = [self.auto_convert]
self.register_listener('import_task_files', self._cleanup) self.register_listener('import_task_files', self._cleanup)
@ -181,27 +193,48 @@ class ConvertPlugin(BeetsPlugin):
if not quiet and not pretend: if not quiet and not pretend:
self._log.info(u'Encoding {0}', util.displayable_path(source)) self._log.info(u'Encoding {0}', util.displayable_path(source))
# On Python 3, we need to construct the command to invoke as a
# Unicode string. On Unix, this is a little unfortunate---the OS is
# expecting bytes---so we use surrogate escaping and decode with the
# argument encoding, which is the same encoding that will then be
# *reversed* to recover the same bytes before invoking the OS. On
# Windows, we want to preserve the Unicode filename "as is."
if not six.PY2:
command = command.decode(util.arg_encoding(), 'surrogateescape')
if platform.system() == 'Windows':
source = source.decode(util._fsencoding())
dest = dest.decode(util._fsencoding())
else:
source = source.decode(util.arg_encoding(), 'surrogateescape')
dest = dest.decode(util.arg_encoding(), 'surrogateescape')
# Substitute $source and $dest in the argument list. # Substitute $source and $dest in the argument list.
args = shlex.split(command) args = shlex.split(command)
encode_cmd = []
for i, arg in enumerate(args): for i, arg in enumerate(args):
args[i] = Template(arg).safe_substitute({ args[i] = Template(arg).safe_substitute({
'source': source, 'source': source,
'dest': dest, 'dest': dest,
}) })
if six.PY2:
encode_cmd.append(args[i])
else:
encode_cmd.append(args[i].encode(util.arg_encoding()))
if pretend: if pretend:
self._log.info(u' '.join(ui.decargs(args))) self._log.info(u'{0}', u' '.join(ui.decargs(args)))
return return
try: try:
util.command_output(args) util.command_output(encode_cmd)
except subprocess.CalledProcessError as exc: except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files # Something went wrong (probably Ctrl+C), remove temporary files
self._log.info(u'Encoding {0} failed. Cleaning up...', self._log.info(u'Encoding {0} failed. Cleaning up...',
util.displayable_path(source)) util.displayable_path(source))
self._log.debug(u'Command {0} exited with status {1}', self._log.debug(u'Command {0} exited with status {1}: {2}',
exc.cmd.decode('utf8', 'ignore'), args,
exc.returncode) exc.returncode,
exc.output)
util.remove(dest) util.remove(dest)
util.prune_dirs(os.path.dirname(dest)) util.prune_dirs(os.path.dirname(dest))
raise raise
@ -218,6 +251,9 @@ class ConvertPlugin(BeetsPlugin):
def convert_item(self, dest_dir, keep_new, path_formats, fmt, def convert_item(self, dest_dir, keep_new, path_formats, fmt,
pretend=False): pretend=False):
"""A pipeline thread that converts `Item` objects from a
library.
"""
command, ext = get_format(fmt) command, ext = get_format(fmt)
item, original, converted = None, None, None item, original, converted = None, None, None
while True: while True:
@ -369,61 +405,66 @@ class ConvertPlugin(BeetsPlugin):
util.copy(album.artpath, dest) util.copy(album.artpath, dest)
def convert_func(self, lib, opts, args): def convert_func(self, lib, opts, args):
if not opts.dest: dest = opts.dest or self.config['dest'].get()
opts.dest = self.config['dest'].get() if not dest:
if not opts.dest:
raise ui.UserError(u'no convert destination set') raise ui.UserError(u'no convert destination set')
opts.dest = util.bytestring_path(opts.dest) dest = util.bytestring_path(dest)
if not opts.threads: threads = opts.threads or self.config['threads'].get(int)
opts.threads = self.config['threads'].get(int)
if self.config['paths']: path_formats = ui.get_path_formats(self.config['paths'] or None)
path_formats = ui.get_path_formats(self.config['paths'])
fmt = opts.format or self.config['format'].as_str().lower()
if opts.pretend is not None:
pretend = opts.pretend
else: else:
path_formats = ui.get_path_formats() pretend = self.config['pretend'].get(bool)
if not opts.format:
opts.format = self.config['format'].get(unicode).lower()
pretend = opts.pretend if opts.pretend is not None else \
self.config['pretend'].get(bool)
if not pretend:
ui.commands.list_items(lib, ui.decargs(args), opts.album)
if not (opts.yes or ui.input_yn(u"Convert? (Y/n)")):
return
if opts.album: if opts.album:
albums = lib.albums(ui.decargs(args)) albums = lib.albums(ui.decargs(args))
items = (i for a in albums for i in a.items()) items = [i for a in albums for i in a.items()]
if self.config['copy_album_art']: if not pretend:
for album in albums: for a in albums:
self.copy_album_art(album, opts.dest, path_formats, ui.print_(format(a, u''))
pretend)
else: else:
items = iter(lib.items(ui.decargs(args))) items = list(lib.items(ui.decargs(args)))
convert = [self.convert_item(opts.dest, if not pretend:
for i in items:
ui.print_(format(i, u''))
if not items:
self._log.error(u'Empty query result.')
return
if not (pretend or opts.yes or ui.input_yn(u"Convert? (Y/n)")):
return
if opts.album and self.config['copy_album_art']:
for album in albums:
self.copy_album_art(album, dest, path_formats, pretend)
convert = [self.convert_item(dest,
opts.keep_new, opts.keep_new,
path_formats, path_formats,
opts.format, fmt,
pretend) pretend)
for _ in range(opts.threads)] for _ in range(threads)]
pipe = util.pipeline.Pipeline([items, convert]) pipe = util.pipeline.Pipeline([iter(items), convert])
pipe.run_parallel() pipe.run_parallel()
def convert_on_import(self, lib, item): def convert_on_import(self, lib, item):
"""Transcode a file automatically after it is imported into the """Transcode a file automatically after it is imported into the
library. library.
""" """
fmt = self.config['format'].get(unicode).lower() fmt = self.config['format'].as_str().lower()
if should_transcode(item, fmt): if should_transcode(item, fmt):
command, ext = get_format() command, ext = get_format()
# Create a temporary file for the conversion. # Create a temporary file for the conversion.
tmpdir = self.config['tmpdir'].get() tmpdir = self.config['tmpdir'].get()
fd, dest = tempfile.mkstemp('.' + ext, dir=tmpdir) if tmpdir:
tmpdir = util.py3_path(util.bytestring_path(tmpdir))
fd, dest = tempfile.mkstemp(util.py3_path(b'.' + ext), dir=tmpdir)
os.close(fd) os.close(fd)
dest = util.bytestring_path(dest) dest = util.bytestring_path(dest)
_temp_files.append(dest) # Delete the transcode later. _temp_files.append(dest) # Delete the transcode later.

View file

@ -35,7 +35,7 @@ class CuePlugin(BeetsPlugin):
return return
if len(cues) > 1: if len(cues) > 1:
self._log.info(u"Found multiple cue files doing nothing: {0}", self._log.info(u"Found multiple cue files doing nothing: {0}",
map(displayable_path, cues)) list(map(displayable_path, cues)))
cue_file = cues[0] cue_file = cues[0]
self._log.info("Found {} for {}", displayable_path(cue_file), item) self._log.info("Found {} for {}", displayable_path(cue_file), item)

View file

@ -19,31 +19,28 @@ discogs-client library.
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import beets.ui import beets.ui
from beets import logging
from beets import config from beets import config
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.util import confit from beets.util import confit
from discogs_client import Release, Client from discogs_client import Release, Master, Client
from discogs_client.exceptions import DiscogsAPIError from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError from requests.exceptions import ConnectionError
from six.moves import http_client
import beets import beets
import re import re
import time import time
import json import json
import socket import socket
import httplib
import os import os
import traceback
from string import ascii_lowercase
# Silence spurious INFO log lines generated by urllib3.
urllib3_logger = logging.getLogger('requests.packages.urllib3')
urllib3_logger.setLevel(logging.CRITICAL)
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__) USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
# Exceptions that discogs_client should really handle but does not. # Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (ConnectionError, socket.error, httplib.HTTPException, CONNECTION_ERRORS = (ConnectionError, socket.error, http_client.HTTPException,
ValueError, # JSON decoding raises a ValueError. ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError) DiscogsAPIError)
@ -57,17 +54,25 @@ class DiscogsPlugin(BeetsPlugin):
'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy', 'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy',
'tokenfile': 'discogs_token.json', 'tokenfile': 'discogs_token.json',
'source_weight': 0.5, 'source_weight': 0.5,
'user_token': '',
}) })
self.config['apikey'].redact = True self.config['apikey'].redact = True
self.config['apisecret'].redact = True self.config['apisecret'].redact = True
self.config['user_token'].redact = True
self.discogs_client = None self.discogs_client = None
self.register_listener('import_begin', self.setup) self.register_listener('import_begin', self.setup)
def setup(self, session=None): def setup(self, session=None):
"""Create the `discogs_client` field. Authenticate if necessary. """Create the `discogs_client` field. Authenticate if necessary.
""" """
c_key = self.config['apikey'].get(unicode) c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].get(unicode) c_secret = self.config['apisecret'].as_str()
# Try using a configured user token (bypassing OAuth login).
user_token = self.config['user_token'].as_str()
if user_token:
self.discogs_client = Client(USER_AGENT, user_token=user_token)
return
# Get the OAuth token from a file or log in. # Get the OAuth token from a file or log in.
try: try:
@ -84,7 +89,7 @@ class DiscogsPlugin(BeetsPlugin):
token, secret) token, secret)
def reset_auth(self): def reset_auth(self):
"""Delete toke file & redo the auth steps. """Delete token file & redo the auth steps.
""" """
os.remove(self._tokenfile()) os.remove(self._tokenfile())
self.setup() self.setup()
@ -194,13 +199,13 @@ class DiscogsPlugin(BeetsPlugin):
# cause a query to return no results, even if they match the artist or # cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english # album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters. # word characters.
# TEMPORARY: Encode as ASCII to work around a bug: # FIXME: Encode as ASCII to work around a bug:
# https://github.com/beetbox/beets/issues/1051 # https://github.com/beetbox/beets/issues/1051
# When the library is fixed, we should encode as UTF-8. # When the library is fixed, we should encode as UTF-8.
query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace") query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace")
# Strip medium information from query, Things like "CD1" and "disk 1" # Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result. # can also negate an otherwise positive result.
query = re.sub(r'(?i)\b(CD|disc)\s*\d+', '', query) query = re.sub(br'(?i)\b(CD|disc)\s*\d+', b'', query)
try: try:
releases = self.discogs_client.search(query, releases = self.discogs_client.search(query,
type='release').page(1) type='release').page(1)
@ -208,11 +213,48 @@ class DiscogsPlugin(BeetsPlugin):
self._log.debug(u"Communication error while searching for {0!r}", self._log.debug(u"Communication error while searching for {0!r}",
query, exc_info=True) query, exc_info=True)
return [] return []
return [self.get_album_info(release) for release in releases[:5]] return [album for album in map(self.get_album_info, releases[:5])
if album]
def get_master_year(self, master_id):
"""Fetches a master release given its Discogs ID and returns its year
or None if the master release is not found.
"""
self._log.debug(u'Searching for master release {0}', master_id)
result = Master(self.discogs_client, {'id': master_id})
try:
year = result.fetch('year')
return year
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(u'API Error: {0} (query: {1})', e, result._uri)
if e.status_code == 401:
self.reset_auth()
return self.get_master_year(master_id)
return None
except CONNECTION_ERRORS:
self._log.debug(u'Connection error in master release lookup',
exc_info=True)
return None
def get_album_info(self, result): def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object. """Returns an AlbumInfo object for a discogs Release object.
""" """
# Explicitly reload the `Release` fields, as they might not be yet
# present if the result is from a `discogs_client.search()`.
if not result.data.get('artists'):
result.refresh()
# Sanity check for required fields. The list of required fields is
# defined at Guideline 1.3.1.a, but in practice some releases might be
# lacking some of these fields. This function expects at least:
# `artists` (>0), `title`, `id`, `tracklist` (>0)
# https://www.discogs.com/help/doc/submission-guidelines-general-rules
if not all([result.data.get(k) for k in ['artists', 'title', 'id',
'tracklist']]):
self._log.warn(u"Release does not contain the required fields")
return None
artist, artist_id = self.get_artist([a.data for a in result.artists]) artist, artist_id = self.get_artist([a.data for a in result.artists])
album = re.sub(r' +', ' ', result.title) album = re.sub(r' +', ' ', result.title)
album_id = result.data['id'] album_id = result.data['id']
@ -221,28 +263,53 @@ class DiscogsPlugin(BeetsPlugin):
# information and leave us with skeleton `Artist` objects that will # information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back. # each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist']) tracks = self.get_tracks(result.data['tracklist'])
# Extract information for the optional AlbumInfo fields, if possible.
va = result.data['artists'][0].get('name', '').lower() == 'various'
year = result.data.get('year')
mediums = [t.medium for t in tracks]
country = result.data.get('country')
data_url = result.data.get('uri')
# Extract information for the optional AlbumInfo fields that are
# contained on nested discogs fields.
albumtype = media = label = catalogno = None
if result.data.get('formats'):
albumtype = ', '.join( albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None result.data['formats'][0].get('descriptions', [])) or None
va = result.data['artists'][0]['name'].lower() == 'various' media = result.data['formats'][0]['name']
if result.data.get('labels'):
label = result.data['labels'][0].get('name')
catalogno = result.data['labels'][0].get('catno')
# Additional cleanups (various artists name, catalog number, media).
if va: if va:
artist = config['va_name'].get(unicode) artist = config['va_name'].as_str()
year = result.data['year']
label = result.data['labels'][0]['name']
mediums = len(set(t.medium for t in tracks))
catalogno = result.data['labels'][0]['catno']
if catalogno == 'none': if catalogno == 'none':
catalogno = None catalogno = None
country = result.data.get('country') # Explicitly set the `media` for the tracks, since it is expected by
media = result.data['formats'][0]['name'] # `autotag.apply_metadata`, and set `medium_total`.
data_url = result.data['uri'] for track in tracks:
track.media = media
track.medium_total = mediums.count(track.medium)
# Discogs does not have track IDs. Invent our own IDs as proposed
# in #2336.
track.track_id = str(album_id) + "-" + track.track_alt
# Retrieve master release id (returns None if there isn't one).
master_id = result.data.get('master_id')
# Assume `original_year` is equal to `year` for releases without
# a master release, otherwise fetch the master release.
original_year = self.get_master_year(master_id) if master_id else year
return AlbumInfo(album, album_id, artist, artist_id, tracks, asin=None, return AlbumInfo(album, album_id, artist, artist_id, tracks, asin=None,
albumtype=albumtype, va=va, year=year, month=None, albumtype=albumtype, va=va, year=year, month=None,
day=None, label=label, mediums=mediums, day=None, label=label, mediums=len(set(mediums)),
artist_sort=None, releasegroup_id=None, artist_sort=None, releasegroup_id=master_id,
catalognum=catalogno, script=None, language=None, catalognum=catalogno, script=None, language=None,
country=country, albumstatus=None, media=media, country=country, albumstatus=None, media=media,
albumdisambig=None, artist_credit=None, albumdisambig=None, artist_credit=None,
original_year=None, original_month=None, original_year=original_year, original_month=None,
original_day=None, data_source='Discogs', original_day=None, data_source='Discogs',
data_url=data_url) data_url=data_url)
@ -269,38 +336,71 @@ class DiscogsPlugin(BeetsPlugin):
def get_tracks(self, tracklist): def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist. """Returns a list of TrackInfo objects for a discogs tracklist.
""" """
try:
clean_tracklist = self.coalesce_tracks(tracklist)
except Exception as exc:
# FIXME: this is an extra precaution for making sure there are no
# side effects after #2222. It should be removed after further
# testing.
self._log.debug(u'{}', traceback.format_exc())
self._log.error(u'uncaught exception in coalesce_tracks: {}', exc)
clean_tracklist = tracklist
tracks = [] tracks = []
index_tracks = {} index_tracks = {}
index = 0 index = 0
for track in tracklist: for track in clean_tracklist:
# Only real tracks have `position`. Otherwise, it's an index track. # Only real tracks have `position`. Otherwise, it's an index track.
if track['position']: if track['position']:
index += 1 index += 1
tracks.append(self.get_track_info(track, index)) track_info = self.get_track_info(track, index)
track_info.track_alt = track['position']
tracks.append(track_info)
else: else:
index_tracks[index + 1] = track['title'] index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is # Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order. # unreliable, but tracks are in order.
medium = None medium = None
medium_count, index_count = 0, 0 medium_count, index_count, side_count = 0, 0, 0
sides_per_medium = 1
# If a medium has two sides (ie. vinyl or cassette), each pair of
# consecutive sides should belong to the same medium.
if all([track.medium is not None for track in tracks]):
m = sorted(set([track.medium.lower() for track in tracks]))
# If all track.medium are single consecutive letters, assume it is
# a 2-sided medium.
if ''.join(m) in ascii_lowercase:
sides_per_medium = 2
for track in tracks: for track in tracks:
# Handle special case where a different medium does not indicate a # Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium # new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these # is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium. # are the track index, not the medium.
# side_count is the number of mediums or medium sides (in the case
# of two-sided mediums) that were seen before.
medium_is_index = track.medium and not track.medium_index and ( medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or len(track.medium) != 1 or
ord(track.medium) - 64 != medium_count + 1 # Not within standard incremental medium values (A, B, C, ...).
ord(track.medium) - 64 != side_count + 1
) )
if not medium_is_index and medium != track.medium: if not medium_is_index and medium != track.medium:
# Increment medium_count and reset index_count when medium side_count += 1
# changes. if sides_per_medium == 2:
medium = track.medium if side_count % sides_per_medium:
# Two-sided medium changed. Reset index_count.
index_count = 0
medium_count += 1
else:
# Medium changed. Reset index_count.
medium_count += 1 medium_count += 1
index_count = 0 index_count = 0
medium = track.medium
index_count += 1 index_count += 1
medium_count = 1 if medium_count == 0 else medium_count
track.medium, track.medium_index = medium_count, index_count track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track # Get `disctitle` from Discogs index tracks. Assume that an index track
@ -315,30 +415,122 @@ class DiscogsPlugin(BeetsPlugin):
return tracks return tracks
def coalesce_tracks(self, raw_tracklist):
"""Pre-process a tracklist, merging subtracks into a single track. The
title for the merged track is the one from the previous index track,
if present; otherwise it is a combination of the subtracks titles.
"""
def add_merged_subtracks(tracklist, subtracks):
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = \
self.get_track_index(subtracks[0]['position'])
position = '%s%s' % (idx or '', medium_idx or '')
if tracklist and not tracklist[-1]['position']:
# Assume the previous index track contains the track title.
if sub_idx:
# "Convert" the track title to a real track, discarding the
# subtracks assuming they are logical divisions of a
# physical track (12.2.9 Subtracks).
tracklist[-1]['position'] = position
else:
# Promote the subtracks to real tracks, discarding the
# index track, assuming the subtracks are physical tracks.
index_track = tracklist.pop()
# Fix artists when they are specified on the index track.
if index_track.get('artists'):
for subtrack in subtracks:
if not subtrack.get('artists'):
subtrack['artists'] = index_track['artists']
tracklist.extend(subtracks)
else:
# Merge the subtracks, pick a title, and append the new track.
track = subtracks[0].copy()
track['title'] = ' / '.join([t['title'] for t in subtracks])
tracklist.append(track)
# Pre-process the tracklist, trying to identify subtracks.
subtracks = []
tracklist = []
prev_subindex = ''
for track in raw_tracklist:
# Regular subtrack (track with subindex).
if track['position']:
_, _, subindex = self.get_track_index(track['position'])
if subindex:
if subindex.rjust(len(raw_tracklist)) > prev_subindex:
# Subtrack still part of the current main track.
subtracks.append(track)
else:
# Subtrack part of a new group (..., 1.3, *2.1*, ...).
add_merged_subtracks(tracklist, subtracks)
subtracks = [track]
prev_subindex = subindex.rjust(len(raw_tracklist))
continue
# Index track with nested sub_tracks.
if not track['position'] and 'sub_tracks' in track:
# Append the index track, assuming it contains the track title.
tracklist.append(track)
add_merged_subtracks(tracklist, track['sub_tracks'])
continue
# Regular track or index track without nested sub_tracks.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
subtracks = []
prev_subindex = ''
tracklist.append(track)
# Merge and add the remaining subtracks, if any.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
return tracklist
def get_track_info(self, track, index): def get_track_info(self, track, index):
"""Returns a TrackInfo object for a discogs track. """Returns a TrackInfo object for a discogs track.
""" """
title = track['title'] title = track['title']
track_id = None track_id = None
medium, medium_index = self.get_track_index(track['position']) medium, medium_index, _ = self.get_track_index(track['position'])
artist, artist_id = self.get_artist(track.get('artists', [])) artist, artist_id = self.get_artist(track.get('artists', []))
length = self.get_track_length(track['duration']) length = self.get_track_length(track['duration'])
return TrackInfo(title, track_id, artist, artist_id, length, index, return TrackInfo(title, track_id, artist=artist, artist_id=artist_id,
medium, medium_index, artist_sort=None, length=length, index=index,
disctitle=None, artist_credit=None) medium=medium, medium_index=medium_index,
artist_sort=None, disctitle=None, artist_credit=None)
def get_track_index(self, position): def get_track_index(self, position):
"""Returns the medium and medium index for a discogs track position. """Returns the medium, medium index and subtrack index for a discogs
""" track position."""
# medium_index is a number at the end of position. medium is everything # Match the standard Discogs positions (12.2.9), which can have several
# else. E.g. (A)(1), (Side A, Track )(1), (A)(), ()(1), etc. # forms (1, 1-1, A1, A1.1, A1a, ...).
match = re.match(r'^(.*?)(\d*)$', position.upper()) match = re.match(
r'^(.*?)' # medium: everything before medium_index.
r'(\d*?)' # medium_index: a number at the end of
# `position`, except if followed by a subtrack
# index.
# subtrack_index: can only be matched if medium
# or medium_index have been matched, and can be
r'((?<=\w)\.[\w]+' # - a dot followed by a string (A.1, 2.A)
r'|(?<=\d)[A-Z]+' # - a string that follows a number (1A, B2a)
r')?'
r'$',
position.upper()
)
if match: if match:
medium, index = match.groups() medium, index, subindex = match.groups()
if subindex and subindex.startswith('.'):
subindex = subindex[1:]
else: else:
self._log.debug(u'Invalid position: {0}', position) self._log.debug(u'Invalid position: {0}', position)
medium = index = None medium = index = subindex = None
return medium or None, index or None return medium or None, index or None, subindex or None
def get_track_length(self, duration): def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration. """Returns the track length in seconds for a discogs duration.

View file

@ -20,9 +20,11 @@ from __future__ import division, absolute_import, print_function
import shlex import shlex
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, vararg_callback, Subcommand, UserError from beets.ui import decargs, print_, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess from beets.util import command_output, displayable_path, subprocess, \
bytestring_path, MoveOperation
from beets.library import Item, Album from beets.library import Item, Album
import six
PLUGIN = 'duplicates' PLUGIN = 'duplicates'
@ -79,10 +81,9 @@ class DuplicatesPlugin(BeetsPlugin):
help=u'report duplicates only if all attributes are set', help=u'report duplicates only if all attributes are set',
) )
self._command.parser.add_option( self._command.parser.add_option(
u'-k', u'--keys', dest='keys', u'-k', u'--key', dest='keys',
action='callback', metavar='KEY1 KEY2', action='append', metavar='KEY',
callback=vararg_callback, help=u'report duplicates based on keys (use multiple times)',
help=u'report duplicates based on keys',
) )
self._command.parser.add_option( self._command.parser.add_option(
u'-M', u'--merge', dest='merge', u'-M', u'--merge', dest='merge',
@ -112,14 +113,14 @@ class DuplicatesPlugin(BeetsPlugin):
self.config.set_args(opts) self.config.set_args(opts)
album = self.config['album'].get(bool) album = self.config['album'].get(bool)
checksum = self.config['checksum'].get(str) checksum = self.config['checksum'].get(str)
copy = self.config['copy'].get(str) copy = bytestring_path(self.config['copy'].as_str())
count = self.config['count'].get(bool) count = self.config['count'].get(bool)
delete = self.config['delete'].get(bool) delete = self.config['delete'].get(bool)
fmt = self.config['format'].get(str) fmt = self.config['format'].get(str)
full = self.config['full'].get(bool) full = self.config['full'].get(bool)
keys = self.config['keys'].get(list) keys = self.config['keys'].as_str_seq()
merge = self.config['merge'].get(bool) merge = self.config['merge'].get(bool)
move = self.config['move'].get(str) move = bytestring_path(self.config['move'].as_str())
path = self.config['path'].get(bool) path = self.config['path'].get(bool)
tiebreak = self.config['tiebreak'].get(dict) tiebreak = self.config['tiebreak'].get(dict)
strict = self.config['strict'].get(bool) strict = self.config['strict'].get(bool)
@ -135,15 +136,15 @@ class DuplicatesPlugin(BeetsPlugin):
items = lib.items(decargs(args)) items = lib.items(decargs(args))
if path: if path:
fmt = '$path' fmt = u'$path'
# Default format string for count mode. # Default format string for count mode.
if count and not fmt: if count and not fmt:
if album: if album:
fmt = '$albumartist - $album' fmt = u'$albumartist - $album'
else: else:
fmt = '$albumartist - $album - $title' fmt = u'$albumartist - $album - $title'
fmt += ': {0}' fmt += u': {0}'
if checksum: if checksum:
for i in items: for i in items:
@ -169,22 +170,22 @@ class DuplicatesPlugin(BeetsPlugin):
return [self._command] return [self._command]
def _process_item(self, item, copy=False, move=False, delete=False, def _process_item(self, item, copy=False, move=False, delete=False,
tag=False, fmt=''): tag=False, fmt=u''):
"""Process Item `item`. """Process Item `item`.
""" """
print_(format(item, fmt)) print_(format(item, fmt))
if copy: if copy:
item.move(basedir=copy, copy=True) item.move(basedir=copy, operation=MoveOperation.COPY)
item.store() item.store()
if move: if move:
item.move(basedir=move, copy=False) item.move(basedir=move)
item.store() item.store()
if delete: if delete:
item.remove(delete=True) item.remove(delete=True)
if tag: if tag:
try: try:
k, v = tag.split('=') k, v = tag.split('=')
except: except Exception:
raise UserError( raise UserError(
u"{}: can't parse k=v tag: {}".format(PLUGIN, tag) u"{}: can't parse k=v tag: {}".format(PLUGIN, tag)
) )
@ -252,20 +253,19 @@ class DuplicatesPlugin(BeetsPlugin):
"completeness" (objects with more non-null fields come first) "completeness" (objects with more non-null fields come first)
and Albums are ordered by their track count. and Albums are ordered by their track count.
""" """
if tiebreak: kind = 'items' if all(isinstance(o, Item) for o in objs) else 'albums'
kind = 'items' if all(isinstance(o, Item)
for o in objs) else 'albums' if tiebreak and kind in tiebreak.keys():
key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind]) key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind])
else: else:
kind = Item if all(isinstance(o, Item) for o in objs) else Album if kind == 'items':
if kind is Item:
def truthy(v): def truthy(v):
# Avoid a Unicode warning by avoiding comparison # Avoid a Unicode warning by avoiding comparison
# between a bytes object and the empty Unicode # between a bytes object and the empty Unicode
# string ''. # string ''.
return v is not None and \ return v is not None and \
(v != '' if isinstance(v, unicode) else True) (v != '' if isinstance(v, six.text_type) else True)
fields = kind.all_keys() fields = Item.all_keys()
key = lambda x: sum(1 for f in fields if truthy(getattr(x, f))) key = lambda x: sum(1 for f in fields if truthy(getattr(x, f)))
else: else:
key = lambda x: len(x.items()) key = lambda x: len(x.items())
@ -311,7 +311,7 @@ class DuplicatesPlugin(BeetsPlugin):
objs[0], objs[0],
displayable_path(o.path), displayable_path(o.path),
displayable_path(missing.destination())) displayable_path(missing.destination()))
missing.move(copy=True) missing.move(operation=MoveOperation.COPY)
return objs return objs
def _merge(self, objs): def _merge(self, objs):
@ -329,7 +329,7 @@ class DuplicatesPlugin(BeetsPlugin):
"""Generate triples of keys, duplicate counts, and constituent objects. """Generate triples of keys, duplicate counts, and constituent objects.
""" """
offset = 0 if full else 1 offset = 0 if full else 1
for k, objs in self._group_by(objs, keys, strict).iteritems(): for k, objs in self._group_by(objs, keys, strict).items():
if len(objs) > 1: if len(objs) > 1:
objs = self._order(objs, tiebreak) objs = self._order(objs, tiebreak)
if merge: if merge:

View file

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016 # Copyright 2016
# #
@ -22,11 +23,12 @@ from beets import ui
from beets.dbcore import types from beets.dbcore import types
from beets.importer import action from beets.importer import action
from beets.ui.commands import _do_query, PromptChoice from beets.ui.commands import _do_query, PromptChoice
from copy import deepcopy import codecs
import subprocess import subprocess
import yaml import yaml
from tempfile import NamedTemporaryFile from tempfile import NamedTemporaryFile
import os import os
import six
# These "safe" types can avoid the format/parse cycle that most fields go # These "safe" types can avoid the format/parse cycle that most fields go
@ -82,7 +84,7 @@ def load(s):
# Convert all keys to strings. They started out as strings, # Convert all keys to strings. They started out as strings,
# but the user may have inadvertently messed this up. # but the user may have inadvertently messed this up.
out.append({unicode(k): v for k, v in d.items()}) out.append({six.text_type(k): v for k, v in d.items()})
except yaml.YAMLError as e: except yaml.YAMLError as e:
raise ParseError(u'invalid YAML: {}'.format(e)) raise ParseError(u'invalid YAML: {}'.format(e))
@ -141,7 +143,7 @@ def apply_(obj, data):
else: else:
# Either the field was stringified originally or the user changed # Either the field was stringified originally or the user changed
# it from a safe type to an unsafe one. Parse it as a string. # it from a safe type to an unsafe one. Parse it as a string.
obj.set_parse(key, unicode(value)) obj.set_parse(key, six.text_type(value))
class EditPlugin(plugins.BeetsPlugin): class EditPlugin(plugins.BeetsPlugin):
@ -242,9 +244,15 @@ class EditPlugin(plugins.BeetsPlugin):
old_data = [flatten(o, fields) for o in objs] old_data = [flatten(o, fields) for o in objs]
# Set up a temporary file with the initial data for editing. # Set up a temporary file with the initial data for editing.
new = NamedTemporaryFile(suffix='.yaml', delete=False) if six.PY2:
new = NamedTemporaryFile(mode='w', suffix='.yaml', delete=False)
else:
new = NamedTemporaryFile(mode='w', suffix='.yaml', delete=False,
encoding='utf-8')
old_str = dump(old_data) old_str = dump(old_data)
new.write(old_str) new.write(old_str)
if six.PY2:
old_str = old_str.decode('utf-8')
new.close() new.close()
# Loop until we have parseable data and the user confirms. # Loop until we have parseable data and the user confirms.
@ -255,7 +263,7 @@ class EditPlugin(plugins.BeetsPlugin):
# Read the data back after editing and check whether anything # Read the data back after editing and check whether anything
# changed. # changed.
with open(new.name) as f: with codecs.open(new.name, encoding='utf-8') as f:
new_str = f.read() new_str = f.read()
if new_str == old_str: if new_str == old_str:
ui.print_(u"No changes; aborting.") ui.print_(u"No changes; aborting.")
@ -274,7 +282,7 @@ class EditPlugin(plugins.BeetsPlugin):
# Show the changes. # Show the changes.
# If the objects are not on the DB yet, we need a copy of their # If the objects are not on the DB yet, we need a copy of their
# original state for show_model_changes. # original state for show_model_changes.
objs_old = [deepcopy(obj) if not obj._db else None objs_old = [obj.copy() if obj.id < 0 else None
for obj in objs] for obj in objs]
self.apply_data(objs, old_data, new_data) self.apply_data(objs, old_data, new_data)
changed = False changed = False
@ -293,9 +301,13 @@ class EditPlugin(plugins.BeetsPlugin):
elif choice == u'c': # Cancel. elif choice == u'c': # Cancel.
return False return False
elif choice == u'e': # Keep editing. elif choice == u'e': # Keep editing.
# Reset the temporary changes to the objects. # Reset the temporary changes to the objects. I we have a
# copy from above, use that, else reload from the database.
objs = [(old_obj or obj)
for old_obj, obj in zip(objs_old, objs)]
for obj in objs: for obj in objs:
obj.read() if not obj.id < 0:
obj.load()
continue continue
# Remove the temporary file before returning. # Remove the temporary file before returning.
@ -310,7 +322,7 @@ class EditPlugin(plugins.BeetsPlugin):
are temporary. are temporary.
""" """
if len(old_data) != len(new_data): if len(old_data) != len(new_data):
self._log.warn(u'number of objects changed from {} to {}', self._log.warning(u'number of objects changed from {} to {}',
len(old_data), len(new_data)) len(old_data), len(new_data))
obj_by_id = {o.id: o for o in objs} obj_by_id = {o.id: o for o in objs}
@ -321,7 +333,7 @@ class EditPlugin(plugins.BeetsPlugin):
forbidden = False forbidden = False
for key in ignore_fields: for key in ignore_fields:
if old_dict.get(key) != new_dict.get(key): if old_dict.get(key) != new_dict.get(key):
self._log.warn(u'ignoring object whose {} changed', key) self._log.warning(u'ignoring object whose {} changed', key)
forbidden = True forbidden = True
break break
if forbidden: if forbidden:
@ -356,9 +368,13 @@ class EditPlugin(plugins.BeetsPlugin):
"""Callback for invoking the functionality during an interactive """Callback for invoking the functionality during an interactive
import session on the *original* item tags. import session on the *original* item tags.
""" """
# Assign temporary ids to the Items. # Assign negative temporary ids to Items that are not in the database
for i, obj in enumerate(task.items): # yet. By using negative values, no clash with items in the database
obj.id = i + 1 # can occur.
for i, obj in enumerate(task.items, start=1):
# The importer may set the id to None when re-importing albums.
if not obj._db or obj.id is None:
obj.id = -i
# Present the YAML to the user and let her change it. # Present the YAML to the user and let her change it.
fields = self._get_fields(album=False, extra=[]) fields = self._get_fields(album=False, extra=[])
@ -366,6 +382,7 @@ class EditPlugin(plugins.BeetsPlugin):
# Remove temporary ids. # Remove temporary ids.
for obj in task.items: for obj in task.items:
if obj.id < 0:
obj.id = None obj.id = None
# Save the new data. # Save the new data.

View file

@ -20,13 +20,35 @@ import os.path
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets import ui from beets import ui
from beets.ui import decargs from beets.ui import print_, decargs
from beets.util import syspath, normpath, displayable_path, bytestring_path from beets.util import syspath, normpath, displayable_path, bytestring_path
from beets.util.artresizer import ArtResizer from beets.util.artresizer import ArtResizer
from beets import config from beets import config
from beets import art from beets import art
def _confirm(objs, album):
"""Show the list of affected objects (items or albums) and confirm
that the user wants to modify their artwork.
`album` is a Boolean indicating whether these are albums (as opposed
to items).
"""
noun = u'album' if album else u'file'
prompt = u'Modify artwork for {} {}{} (Y/n)?'.format(
len(objs),
noun,
u's' if len(objs) > 1 else u''
)
# Show all the items or albums.
for obj in objs:
print_(format(obj))
# Confirm with user.
return ui.input_yn(prompt)
class EmbedCoverArtPlugin(BeetsPlugin): class EmbedCoverArtPlugin(BeetsPlugin):
"""Allows albumart to be embedded into the actual files. """Allows albumart to be embedded into the actual files.
""" """
@ -60,6 +82,9 @@ class EmbedCoverArtPlugin(BeetsPlugin):
embed_cmd.parser.add_option( embed_cmd.parser.add_option(
u'-f', u'--file', metavar='PATH', help=u'the image file to embed' u'-f', u'--file', metavar='PATH', help=u'the image file to embed'
) )
embed_cmd.parser.add_option(
u"-y", u"--yes", action="store_true", help=u"skip confirmation"
)
maxwidth = self.config['maxwidth'].get(int) maxwidth = self.config['maxwidth'].get(int)
compare_threshold = self.config['compare_threshold'].get(int) compare_threshold = self.config['compare_threshold'].get(int)
ifempty = self.config['ifempty'].get(bool) ifempty = self.config['ifempty'].get(bool)
@ -71,11 +96,24 @@ class EmbedCoverArtPlugin(BeetsPlugin):
raise ui.UserError(u'image file {0} not found'.format( raise ui.UserError(u'image file {0} not found'.format(
displayable_path(imagepath) displayable_path(imagepath)
)) ))
for item in lib.items(decargs(args)):
items = lib.items(decargs(args))
# Confirm with user.
if not opts.yes and not _confirm(items, not opts.file):
return
for item in items:
art.embed_item(self._log, item, imagepath, maxwidth, None, art.embed_item(self._log, item, imagepath, maxwidth, None,
compare_threshold, ifempty) compare_threshold, ifempty)
else: else:
for album in lib.albums(decargs(args)): albums = lib.albums(decargs(args))
# Confirm with user.
if not opts.yes and not _confirm(albums, not opts.file):
return
for album in albums:
art.embed_album(self._log, album, maxwidth, False, art.embed_album(self._log, album, maxwidth, False,
compare_threshold, ifempty) compare_threshold, ifempty)
self.remove_artfile(album) self.remove_artfile(album)
@ -107,7 +145,7 @@ class EmbedCoverArtPlugin(BeetsPlugin):
else: else:
filename = bytestring_path(opts.filename or filename = bytestring_path(opts.filename or
config['art_filename'].get()) config['art_filename'].get())
if os.path.dirname(filename) != '': if os.path.dirname(filename) != b'':
self._log.error( self._log.error(
u"Only specify a name rather than a path for -n") u"Only specify a name rather than a path for -n")
return return
@ -125,8 +163,15 @@ class EmbedCoverArtPlugin(BeetsPlugin):
'clearart', 'clearart',
help=u'remove images from file metadata', help=u'remove images from file metadata',
) )
clear_cmd.parser.add_option(
u"-y", u"--yes", action="store_true", help=u"skip confirmation"
)
def clear_func(lib, opts, args): def clear_func(lib, opts, args):
items = lib.items(decargs(args))
# Confirm with user.
if not opts.yes and not _confirm(items, False):
return
art.clear(self._log, lib, decargs(args)) art.clear(self._log, lib, decargs(args))
clear_cmd.func = clear_func clear_cmd.func = clear_func

View file

@ -6,22 +6,51 @@
host: localhost host: localhost
port: 8096 port: 8096
username: user username: user
apikey: apikey
password: password password: password
""" """
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
from beets import config
from beets.plugins import BeetsPlugin
from urllib import urlencode
from urlparse import urljoin, parse_qs, urlsplit, urlunsplit
import hashlib import hashlib
import requests import requests
from six.moves.urllib.parse import urlencode
from six.moves.urllib.parse import urljoin, parse_qs, urlsplit, urlunsplit
from beets import config
from beets.plugins import BeetsPlugin
def api_url(host, port, endpoint): def api_url(host, port, endpoint):
"""Returns a joined url. """Returns a joined url.
Takes host, port and endpoint and generates a valid emby API url.
:param host: Hostname of the emby server
:param port: Portnumber of the emby server
:param endpoint: API endpoint
:type host: str
:type port: int
:type endpoint: str
:returns: Full API url
:rtype: str
""" """
joined = urljoin('http://{0}:{1}'.format(host, port), endpoint) # check if http or https is defined as host and create hostname
hostname_list = [host]
if host.startswith('http://') or host.startswith('https://'):
hostname = ''.join(hostname_list)
else:
hostname_list.insert(0, 'http://')
hostname = ''.join(hostname_list)
joined = urljoin(
'{hostname}:{port}'.format(
hostname=hostname,
port=port
),
endpoint
)
scheme, netloc, path, query_string, fragment = urlsplit(joined) scheme, netloc, path, query_string, fragment = urlsplit(joined)
query_params = parse_qs(query_string) query_params = parse_qs(query_string)
@ -33,34 +62,62 @@ def api_url(host, port, endpoint):
def password_data(username, password): def password_data(username, password):
"""Returns a dict with username and its encoded password. """Returns a dict with username and its encoded password.
:param username: Emby username
:param password: Emby password
:type username: str
:type password: str
:returns: Dictionary with username and encoded password
:rtype: dict
""" """
return { return {
'username': username, 'username': username,
'password': hashlib.sha1(password).hexdigest(), 'password': hashlib.sha1(password.encode('utf-8')).hexdigest(),
'passwordMd5': hashlib.md5(password).hexdigest() 'passwordMd5': hashlib.md5(password.encode('utf-8')).hexdigest()
} }
def create_headers(user_id, token=None): def create_headers(user_id, token=None):
"""Return header dict that is needed to talk to the Emby API. """Return header dict that is needed to talk to the Emby API.
:param user_id: Emby user ID
:param token: Authentication token for Emby
:type user_id: str
:type token: str
:returns: Headers for requests
:rtype: dict
""" """
headers = { headers = {}
'Authorization': 'MediaBrowser',
'UserId': user_id, authorization = (
'Client': 'other', 'MediaBrowser UserId="{user_id}", '
'Device': 'empy', 'Client="other", '
'DeviceId': 'beets', 'Device="beets", '
'Version': '0.0.0' 'DeviceId="beets", '
} 'Version="0.0.0"'
).format(user_id=user_id)
headers['x-emby-authorization'] = authorization
if token: if token:
headers['X-MediaBrowser-Token'] = token headers['x-mediabrowser-token'] = token
return headers return headers
def get_token(host, port, headers, auth_data): def get_token(host, port, headers, auth_data):
"""Return token for a user. """Return token for a user.
:param host: Emby host
:param port: Emby port
:param headers: Headers for requests
:param auth_data: Username and encoded password for authentication
:type host: str
:type port: int
:type headers: dict
:type auth_data: dict
:returns: Access Token
:rtype: str
""" """
url = api_url(host, port, '/Users/AuthenticateByName') url = api_url(host, port, '/Users/AuthenticateByName')
r = requests.post(url, headers=headers, data=auth_data) r = requests.post(url, headers=headers, data=auth_data)
@ -70,6 +127,15 @@ def get_token(host, port, headers, auth_data):
def get_user(host, port, username): def get_user(host, port, username):
"""Return user dict from server or None if there is no user. """Return user dict from server or None if there is no user.
:param host: Emby host
:param port: Emby port
:username: Username
:type host: str
:type port: int
:type username: str
:returns: Matched Users
:rtype: list
""" """
url = api_url(host, port, '/Users/Public') url = api_url(host, port, '/Users/Public')
r = requests.get(url) r = requests.get(url)
@ -84,8 +150,10 @@ class EmbyUpdate(BeetsPlugin):
# Adding defaults. # Adding defaults.
config['emby'].add({ config['emby'].add({
u'host': u'localhost', u'host': u'http://localhost',
u'port': 8096 u'port': 8096,
u'apikey': None,
u'password': None,
}) })
self.register_listener('database_change', self.listen_for_db_change) self.register_listener('database_change', self.listen_for_db_change)
@ -104,6 +172,12 @@ class EmbyUpdate(BeetsPlugin):
port = config['emby']['port'].get() port = config['emby']['port'].get()
username = config['emby']['username'].get() username = config['emby']['username'].get()
password = config['emby']['password'].get() password = config['emby']['password'].get()
token = config['emby']['apikey'].get()
# Check if at least a apikey or password is given.
if not any([password, token]):
self._log.warning(u'Provide at least Emby password or apikey.')
return
# Get user information from the Emby API. # Get user information from the Emby API.
user = get_user(host, port, username) user = get_user(host, port, username)
@ -111,6 +185,7 @@ class EmbyUpdate(BeetsPlugin):
self._log.warning(u'User {0} could not be found.'.format(username)) self._log.warning(u'User {0} could not be found.'.format(username))
return return
if not token:
# Create Authentication data and headers. # Create Authentication data and headers.
auth_data = password_data(username, password) auth_data = password_data(username, password)
headers = create_headers(user[0]['Id']) headers = create_headers(user[0]['Id'])

View file

@ -29,8 +29,11 @@ from beets import importer
from beets import ui from beets import ui
from beets import util from beets import util
from beets import config from beets import config
from beets.mediafile import image_mime_type
from beets.util.artresizer import ArtResizer from beets.util.artresizer import ArtResizer
from beets.util import confit from beets.util import confit
from beets.util import syspath, bytestring_path, py3_path
import six
try: try:
import itunes import itunes
@ -38,9 +41,11 @@ try:
except ImportError: except ImportError:
HAVE_ITUNES = False HAVE_ITUNES = False
IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg'] CONTENT_TYPES = {
CONTENT_TYPES = ('image/jpeg', 'image/png') 'image/jpeg': [b'jpg', b'jpeg'],
DOWNLOAD_EXTENSION = '.jpg' 'image/png': [b'png']
}
IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts]
class Candidate(object): class Candidate(object):
@ -64,7 +69,7 @@ class Candidate(object):
self.match = match self.match = match
self.size = size self.size = size
def _validate(self, extra): def _validate(self, plugin):
"""Determine whether the candidate artwork is valid based on """Determine whether the candidate artwork is valid based on
its dimensions (width and ratio). its dimensions (width and ratio).
@ -75,9 +80,7 @@ class Candidate(object):
if not self.path: if not self.path:
return self.CANDIDATE_BAD return self.CANDIDATE_BAD
if not (extra['enforce_ratio'] or if not (plugin.enforce_ratio or plugin.minwidth or plugin.maxwidth):
extra['minwidth'] or
extra['maxwidth']):
return self.CANDIDATE_EXACT return self.CANDIDATE_EXACT
# get_size returns None if no local imaging backend is available # get_size returns None if no local imaging backend is available
@ -96,22 +99,22 @@ class Candidate(object):
long_edge = max(self.size) long_edge = max(self.size)
# Check minimum size. # Check minimum size.
if extra['minwidth'] and self.size[0] < extra['minwidth']: if plugin.minwidth and self.size[0] < plugin.minwidth:
self._log.debug(u'image too small ({} < {})', self._log.debug(u'image too small ({} < {})',
self.size[0], extra['minwidth']) self.size[0], plugin.minwidth)
return self.CANDIDATE_BAD return self.CANDIDATE_BAD
# Check aspect ratio. # Check aspect ratio.
edge_diff = long_edge - short_edge edge_diff = long_edge - short_edge
if extra['enforce_ratio']: if plugin.enforce_ratio:
if extra['margin_px']: if plugin.margin_px:
if edge_diff > extra['margin_px']: if edge_diff > plugin.margin_px:
self._log.debug(u'image is not close enough to being ' self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})', u'square, ({} - {} > {})',
long_edge, short_edge, extra['margin_px']) long_edge, short_edge, plugin.margin_px)
return self.CANDIDATE_BAD return self.CANDIDATE_BAD
elif extra['margin_percent']: elif plugin.margin_percent:
margin_px = extra['margin_percent'] * long_edge margin_px = plugin.margin_percent * long_edge
if edge_diff > margin_px: if edge_diff > margin_px:
self._log.debug(u'image is not close enough to being ' self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})', u'square, ({} - {} > {})',
@ -124,20 +127,20 @@ class Candidate(object):
return self.CANDIDATE_BAD return self.CANDIDATE_BAD
# Check maximum size. # Check maximum size.
if extra['maxwidth'] and self.size[0] > extra['maxwidth']: if plugin.maxwidth and self.size[0] > plugin.maxwidth:
self._log.debug(u'image needs resizing ({} > {})', self._log.debug(u'image needs resizing ({} > {})',
self.size[0], extra['maxwidth']) self.size[0], plugin.maxwidth)
return self.CANDIDATE_DOWNSCALE return self.CANDIDATE_DOWNSCALE
return self.CANDIDATE_EXACT return self.CANDIDATE_EXACT
def validate(self, extra): def validate(self, plugin):
self.check = self._validate(extra) self.check = self._validate(plugin)
return self.check return self.check
def resize(self, extra): def resize(self, plugin):
if extra['maxwidth'] and self.check == self.CANDIDATE_DOWNSCALE: if plugin.maxwidth and self.check == self.CANDIDATE_DOWNSCALE:
self.path = ArtResizer.shared.resize(extra['maxwidth'], self.path) self.path = ArtResizer.shared.resize(plugin.maxwidth, self.path)
def _logged_get(log, *args, **kwargs): def _logged_get(log, *args, **kwargs):
@ -189,17 +192,20 @@ class RequestMixin(object):
# ART SOURCES ################################################################ # ART SOURCES ################################################################
class ArtSource(RequestMixin): class ArtSource(RequestMixin):
def __init__(self, log, config): VALID_MATCHING_CRITERIA = ['default']
def __init__(self, log, config, match_by=None):
self._log = log self._log = log
self._config = config self._config = config
self.match_by = match_by or self.VALID_MATCHING_CRITERIA
def get(self, album, extra): def get(self, album, plugin, paths):
raise NotImplementedError() raise NotImplementedError()
def _candidate(self, **kwargs): def _candidate(self, **kwargs):
return Candidate(source=self, log=self._log, **kwargs) return Candidate(source=self, log=self._log, **kwargs)
def fetch_image(self, candidate, extra): def fetch_image(self, candidate, plugin):
raise NotImplementedError() raise NotImplementedError()
@ -207,7 +213,7 @@ class LocalArtSource(ArtSource):
IS_LOCAL = True IS_LOCAL = True
LOC_STR = u'local' LOC_STR = u'local'
def fetch_image(self, candidate, extra): def fetch_image(self, candidate, plugin):
pass pass
@ -215,58 +221,94 @@ class RemoteArtSource(ArtSource):
IS_LOCAL = False IS_LOCAL = False
LOC_STR = u'remote' LOC_STR = u'remote'
def fetch_image(self, candidate, extra): def fetch_image(self, candidate, plugin):
"""Downloads an image from a URL and checks whether it seems to """Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image. actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None. Otherwise, returns None.
""" """
if extra['maxwidth']: if plugin.maxwidth:
candidate.url = ArtResizer.shared.proxy_url(extra['maxwidth'], candidate.url = ArtResizer.shared.proxy_url(plugin.maxwidth,
candidate.url) candidate.url)
try: try:
with closing(self.request(candidate.url, stream=True, with closing(self.request(candidate.url, stream=True,
message=u'downloading image')) as resp: message=u'downloading image')) as resp:
if 'Content-Type' not in resp.headers \ ct = resp.headers.get('Content-Type', None)
or resp.headers['Content-Type'] not in CONTENT_TYPES:
self._log.debug( # Download the image to a temporary file. As some servers
u'not a supported image: {}', # (notably fanart.tv) have proven to return wrong Content-Types
resp.headers.get('Content-Type') or u'no content type', # when images were uploaded with a bad file extension, do not
) # rely on it. Instead validate the type using the file magic
candidate.path = None # and only then determine the extension.
data = resp.iter_content(chunk_size=1024)
header = b''
for chunk in data:
header += chunk
if len(header) >= 32:
# The imghdr module will only read 32 bytes, and our
# own additions in mediafile even less.
break
else:
# server didn't return enough data, i.e. corrupt image
return return
# Generate a temporary file with the correct extension. real_ct = image_mime_type(header)
with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION, if real_ct is None:
delete=False) as fh: # detection by file magic failed, fall back to the
for chunk in resp.iter_content(chunk_size=1024): # server-supplied Content-Type
# Is our type detection failsafe enough to drop this?
real_ct = ct
if real_ct not in CONTENT_TYPES:
self._log.debug(u'not a supported image: {}',
real_ct or u'unknown content type')
return
ext = b'.' + CONTENT_TYPES[real_ct][0]
if real_ct != ct:
self._log.warning(u'Server specified {}, but returned a '
u'{} image. Correcting the extension '
u'to {}',
ct, real_ct, ext)
suffix = py3_path(ext)
with NamedTemporaryFile(suffix=suffix, delete=False) as fh:
# write the first already loaded part of the image
fh.write(header)
# download the remaining part of the image
for chunk in data:
fh.write(chunk) fh.write(chunk)
self._log.debug(u'downloaded art to: {0}', self._log.debug(u'downloaded art to: {0}',
util.displayable_path(fh.name)) util.displayable_path(fh.name))
candidate.path = fh.name candidate.path = util.bytestring_path(fh.name)
return return
except (IOError, requests.RequestException, TypeError) as exc: except (IOError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug: # Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556 # https://github.com/shazow/urllib3/issues/556
self._log.debug(u'error fetching art: {}', exc) self._log.debug(u'error fetching art: {}', exc)
candidate.path = None
return return
class CoverArtArchive(RemoteArtSource): class CoverArtArchive(RemoteArtSource):
NAME = u"Cover Art Archive" NAME = u"Cover Art Archive"
VALID_MATCHING_CRITERIA = ['release', 'releasegroup']
if util.SNI_SUPPORTED:
URL = 'https://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'https://coverartarchive.org/release-group/{mbid}/front'
else:
URL = 'http://coverartarchive.org/release/{mbid}/front' URL = 'http://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front' GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front'
def get(self, album, extra): def get(self, album, plugin, paths):
"""Return the Cover Art Archive and Cover Art Archive release group URLs """Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID. using album MusicBrainz release ID and release group ID.
""" """
if album.mb_albumid: if 'release' in self.match_by and album.mb_albumid:
yield self._candidate(url=self.URL.format(mbid=album.mb_albumid), yield self._candidate(url=self.URL.format(mbid=album.mb_albumid),
match=Candidate.MATCH_EXACT) match=Candidate.MATCH_EXACT)
if album.mb_releasegroupid: if 'releasegroup' in self.match_by and album.mb_releasegroupid:
yield self._candidate( yield self._candidate(
url=self.GROUP_URL.format(mbid=album.mb_releasegroupid), url=self.GROUP_URL.format(mbid=album.mb_releasegroupid),
match=Candidate.MATCH_FALLBACK) match=Candidate.MATCH_FALLBACK)
@ -277,7 +319,7 @@ class Amazon(RemoteArtSource):
URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg' URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
INDICES = (1, 2) INDICES = (1, 2)
def get(self, album, extra): def get(self, album, plugin, paths):
"""Generate URLs using Amazon ID (ASIN) string. """Generate URLs using Amazon ID (ASIN) string.
""" """
if album.asin: if album.asin:
@ -291,7 +333,7 @@ class AlbumArtOrg(RemoteArtSource):
URL = 'http://www.albumart.org/index_detail.php' URL = 'http://www.albumart.org/index_detail.php'
PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"' PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def get(self, album, extra): def get(self, album, plugin, paths):
"""Return art URL from AlbumArt.org using album ASIN. """Return art URL from AlbumArt.org using album ASIN.
""" """
if not album.asin: if not album.asin:
@ -322,7 +364,7 @@ class GoogleImages(RemoteArtSource):
self.key = self._config['google_key'].get(), self.key = self._config['google_key'].get(),
self.cx = self._config['google_engine'].get(), self.cx = self._config['google_engine'].get(),
def get(self, album, extra): def get(self, album, plugin, paths):
"""Return art URL from google custom search engine """Return art URL from google custom search engine
given an album title and interpreter. given an album title and interpreter.
""" """
@ -358,8 +400,7 @@ class GoogleImages(RemoteArtSource):
class FanartTV(RemoteArtSource): class FanartTV(RemoteArtSource):
"""Art from fanart.tv requested using their API""" """Art from fanart.tv requested using their API"""
NAME = u"fanart.tv" NAME = u"fanart.tv"
API_URL = 'https://webservice.fanart.tv/v3/'
API_URL = 'http://webservice.fanart.tv/v3/'
API_ALBUMS = API_URL + 'music/albums/' API_ALBUMS = API_URL + 'music/albums/'
PROJECT_KEY = '61a7d0ab4e67162b7a0c7c35915cd48e' PROJECT_KEY = '61a7d0ab4e67162b7a0c7c35915cd48e'
@ -367,7 +408,7 @@ class FanartTV(RemoteArtSource):
super(FanartTV, self).__init__(*args, **kwargs) super(FanartTV, self).__init__(*args, **kwargs)
self.client_key = self._config['fanarttv_key'].get() self.client_key = self._config['fanarttv_key'].get()
def get(self, album, extra): def get(self, album, plugin, paths):
if not album.mb_releasegroupid: if not album.mb_releasegroupid:
return return
@ -418,7 +459,7 @@ class FanartTV(RemoteArtSource):
class ITunesStore(RemoteArtSource): class ITunesStore(RemoteArtSource):
NAME = u"iTunes Store" NAME = u"iTunes Store"
def get(self, album, extra): def get(self, album, plugin, paths):
"""Return art URL from iTunes Store given an album title. """Return art URL from iTunes Store given an album title.
""" """
if not (album.albumartist and album.album): if not (album.albumartist and album.album):
@ -452,8 +493,8 @@ class ITunesStore(RemoteArtSource):
class Wikipedia(RemoteArtSource): class Wikipedia(RemoteArtSource):
NAME = u"Wikipedia (queried through DBpedia)" NAME = u"Wikipedia (queried through DBpedia)"
DBPEDIA_URL = 'http://dbpedia.org/sparql' DBPEDIA_URL = 'https://dbpedia.org/sparql'
WIKIPEDIA_URL = 'http://en.wikipedia.org/w/api.php' WIKIPEDIA_URL = 'https://en.wikipedia.org/w/api.php'
SPARQL_QUERY = u'''PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> SPARQL_QUERY = u'''PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dbpprop: <http://dbpedia.org/property/> PREFIX dbpprop: <http://dbpedia.org/property/>
PREFIX owl: <http://dbpedia.org/ontology/> PREFIX owl: <http://dbpedia.org/ontology/>
@ -476,7 +517,7 @@ class Wikipedia(RemoteArtSource):
}} }}
Limit 1''' Limit 1'''
def get(self, album, extra): def get(self, album, plugin, paths):
if not (album.albumartist and album.album): if not (album.albumartist and album.album):
return return
@ -566,7 +607,7 @@ class Wikipedia(RemoteArtSource):
try: try:
data = wikipedia_response.json() data = wikipedia_response.json()
results = data['query']['pages'] results = data['query']['pages']
for _, result in results.iteritems(): for _, result in results.items():
image_url = result['imageinfo'][0]['url'] image_url = result['imageinfo'][0]['url']
yield self._candidate(url=image_url, yield self._candidate(url=image_url,
match=Candidate.MATCH_EXACT) match=Candidate.MATCH_EXACT)
@ -588,26 +629,26 @@ class FileSystem(LocalArtSource):
""" """
return [idx for (idx, x) in enumerate(cover_names) if x in filename] return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def get(self, album, extra): def get(self, album, plugin, paths):
"""Look for album art files in the specified directories. """Look for album art files in the specified directories.
""" """
paths = extra['paths']
if not paths: if not paths:
return return
cover_names = extra['cover_names'] cover_names = list(map(util.bytestring_path, plugin.cover_names))
cover_pat = br"(\b|_)({0})(\b|_)".format(b'|'.join(cover_names)) cover_names_str = b'|'.join(cover_names)
cautious = extra['cautious'] cover_pat = br''.join([br"(\b|_)(", cover_names_str, br")(\b|_)"])
for path in paths: for path in paths:
if not os.path.isdir(path): if not os.path.isdir(syspath(path)):
continue continue
# Find all files that look like images in the directory. # Find all files that look like images in the directory.
images = [] images = []
for fn in os.listdir(path): for fn in os.listdir(syspath(path)):
fn = bytestring_path(fn)
for ext in IMAGE_EXTENSIONS: for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith(b'.' + ext.encode('utf8')) and \ if fn.lower().endswith(b'.' + ext) and \
os.path.isfile(os.path.join(path, fn)): os.path.isfile(syspath(os.path.join(path, fn))):
images.append(fn) images.append(fn)
# Look for "preferred" filenames. # Look for "preferred" filenames.
@ -625,7 +666,7 @@ class FileSystem(LocalArtSource):
remaining.append(fn) remaining.append(fn)
# Fall back to any image in the folder. # Fall back to any image in the folder.
if remaining and not cautious: if remaining and not plugin.cautious:
self._log.debug(u'using fallback art file {0}', self._log.debug(u'using fallback art file {0}',
util.displayable_path(remaining[0])) util.displayable_path(remaining[0]))
yield self._candidate(path=os.path.join(path, remaining[0]), yield self._candidate(path=os.path.join(path, remaining[0]),
@ -691,7 +732,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
confit.String(pattern=self.PAT_PERCENT)])) confit.String(pattern=self.PAT_PERCENT)]))
self.margin_px = None self.margin_px = None
self.margin_percent = None self.margin_percent = None
if type(self.enforce_ratio) is unicode: if type(self.enforce_ratio) is six.text_type:
if self.enforce_ratio[-1] == u'%': if self.enforce_ratio[-1] == u'%':
self.margin_percent = float(self.enforce_ratio[:-1]) / 100 self.margin_percent = float(self.enforce_ratio[:-1]) / 100
elif self.enforce_ratio[-2:] == u'px': elif self.enforce_ratio[-2:] == u'px':
@ -702,7 +743,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
self.enforce_ratio = True self.enforce_ratio = True
cover_names = self.config['cover_names'].as_str_seq() cover_names = self.config['cover_names'].as_str_seq()
self.cover_names = map(util.bytestring_path, cover_names) self.cover_names = list(map(util.bytestring_path, cover_names))
self.cautious = self.config['cautious'].get(bool) self.cautious = self.config['cautious'].get(bool)
self.store_source = self.config['store_source'].get(bool) self.store_source = self.config['store_source'].get(bool)
@ -720,20 +761,30 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
if not self.config['google_key'].get() and \ if not self.config['google_key'].get() and \
u'google' in available_sources: u'google' in available_sources:
available_sources.remove(u'google') available_sources.remove(u'google')
sources_name = plugins.sanitize_choices( available_sources = [(s, c)
self.config['sources'].as_str_seq(), available_sources) for s in available_sources
for c in ART_SOURCES[s].VALID_MATCHING_CRITERIA]
sources = plugins.sanitize_pairs(
self.config['sources'].as_pairs(default_value='*'),
available_sources)
if 'remote_priority' in self.config: if 'remote_priority' in self.config:
self._log.warning( self._log.warning(
u'The `fetch_art.remote_priority` configuration option has ' u'The `fetch_art.remote_priority` configuration option has '
u'been deprecated, see the documentation.') u'been deprecated. Instead, place `filesystem` at the end of '
u'your `sources` list.')
if self.config['remote_priority'].get(bool): if self.config['remote_priority'].get(bool):
try: fs = []
sources_name.remove(u'filesystem') others = []
sources_name.append(u'filesystem') for s, c in sources:
except ValueError: if s == 'filesystem':
pass fs.append((s, c))
self.sources = [ART_SOURCES[s](self._log, self.config) else:
for s in sources_name] others.append((s, c))
sources = others + fs
self.sources = [ART_SOURCES[s](self._log, self.config, match_by=[c])
for s, c in sources]
# Asynchronous; after music is added to the library. # Asynchronous; after music is added to the library.
def fetch_art(self, session, task): def fetch_art(self, session, task):
@ -745,7 +796,8 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
if task.choice_flag == importer.action.ASIS: if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art. # For as-is imports, don't search Web sources for art.
local = True local = True
elif task.choice_flag == importer.action.APPLY: elif task.choice_flag in (importer.action.APPLY,
importer.action.RETAG):
# Search everywhere for art. # Search everywhere for art.
local = False local = False
else: else:
@ -786,9 +838,15 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
action='store_true', default=False, action='store_true', default=False,
help=u're-download art when already present' help=u're-download art when already present'
) )
cmd.parser.add_option(
u'-q', u'--quiet', dest='quiet',
action='store_true', default=False,
help=u'shows only quiet art'
)
def func(lib, opts, args): def func(lib, opts, args):
self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force) self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force,
opts.quiet)
cmd.func = func cmd.func = func
return [cmd] return [cmd]
@ -803,16 +861,6 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
""" """
out = None out = None
# all the information any of the sources might need
extra = {'paths': paths,
'cover_names': self.cover_names,
'cautious': self.cautious,
'enforce_ratio': self.enforce_ratio,
'margin_px': self.margin_px,
'margin_percent': self.margin_percent,
'minwidth': self.minwidth,
'maxwidth': self.maxwidth}
for source in self.sources: for source in self.sources:
if source.IS_LOCAL or not local_only: if source.IS_LOCAL or not local_only:
self._log.debug( self._log.debug(
@ -822,9 +870,9 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
) )
# URLs might be invalid at this point, or the image may not # URLs might be invalid at this point, or the image may not
# fulfill the requirements # fulfill the requirements
for candidate in source.get(album, extra): for candidate in source.get(album, self, paths):
source.fetch_image(candidate, extra) source.fetch_image(candidate, self)
if candidate.validate(extra): if candidate.validate(self):
out = candidate out = candidate
self._log.debug( self._log.debug(
u'using {0.LOC_STR} image {1}'.format( u'using {0.LOC_STR} image {1}'.format(
@ -834,17 +882,20 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
break break
if out: if out:
out.resize(extra) out.resize(self)
return out return out
def batch_fetch_art(self, lib, albums, force): def batch_fetch_art(self, lib, albums, force, quiet):
"""Fetch album art for each of the albums. This implements the manual """Fetch album art for each of the albums. This implements the manual
fetchart CLI command. fetchart CLI command.
""" """
for album in albums: for album in albums:
if album.artpath and not force and os.path.isfile(album.artpath): if album.artpath and not force and os.path.isfile(album.artpath):
message = ui.colorize('text_highlight_minor', u'has album art') if not quiet:
message = ui.colorize('text_highlight_minor',
u'has album art')
self._log.info(u'{0}: {1}', album, message)
else: else:
# In ordinary invocations, look for images on the # In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web # filesystem. When forcing, however, always go to the Web
@ -857,5 +908,4 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
message = ui.colorize('text_success', u'found album art') message = ui.colorize('text_success', u'found album art')
else: else:
message = ui.colorize('text_error', u'no art found') message = ui.colorize('text_error', u'no art found')
self._log.info(u'{0}: {1}', album, message) self._log.info(u'{0}: {1}', album, message)

View file

@ -20,6 +20,7 @@ from __future__ import division, absolute_import, print_function
import re import re
from beets import config from beets import config
from beets.util import bytestring_path
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.importer import SingletonImportTask from beets.importer import SingletonImportTask
@ -35,14 +36,15 @@ class FileFilterPlugin(BeetsPlugin):
self.path_album_regex = \ self.path_album_regex = \
self.path_singleton_regex = \ self.path_singleton_regex = \
re.compile(self.config['path'].get()) re.compile(bytestring_path(self.config['path'].get()))
if 'album_path' in self.config: if 'album_path' in self.config:
self.path_album_regex = re.compile(self.config['album_path'].get()) self.path_album_regex = re.compile(
bytestring_path(self.config['album_path'].get()))
if 'singleton_path' in self.config: if 'singleton_path' in self.config:
self.path_singleton_regex = re.compile( self.path_singleton_regex = re.compile(
self.config['singleton_path'].get()) bytestring_path(self.config['singleton_path'].get()))
def import_task_created_event(self, session, task): def import_task_created_event(self, session, task):
if task.items and len(task.items) > 0: if task.items and len(task.items) > 0:
@ -69,6 +71,7 @@ class FileFilterPlugin(BeetsPlugin):
of the file given in full_path. of the file given in full_path.
""" """
import_config = dict(config['import']) import_config = dict(config['import'])
full_path = bytestring_path(full_path)
if 'singletons' not in import_config or not import_config[ if 'singletons' not in import_config or not import_config[
'singletons']: 'singletons']:
# Album # Album

View file

@ -22,34 +22,26 @@ from beets import plugins
from beets.util import displayable_path from beets.util import displayable_path
import os import os
import re import re
import six
# Filename field extraction patterns. # Filename field extraction patterns.
PATTERNS = [ PATTERNS = [
# "01 - Track 01" and "01": do nothing
r'^(\d+)\s*-\s*track\s*\d$',
r'^\d+$',
# Useful patterns. # Useful patterns.
r'^(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', r'^(?P<artist>.+)[\-_](?P<title>.+)[\-_](?P<tag>.*)$',
r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', r'^(?P<track>\d+)[\s.\-_]+(?P<artist>.+)[\-_](?P<title>.+)[\-_](?P<tag>.*)$',
r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$', r'^(?P<artist>.+)[\-_](?P<title>.+)$',
r'^(?P<artist>.+)-(?P<title>.+)$', r'^(?P<track>\d+)[\s.\-_]+(?P<artist>.+)[\-_](?P<title>.+)$',
r'^(?P<track>\d+)\.\s*(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\s*-\s*(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<title>.+)$', r'^(?P<title>.+)$',
r'^(?P<track>\d+)\.\s*(?P<title>.+)$', r'^(?P<track>\d+)[\s.\-_]+(?P<title>.+)$',
r'^(?P<track>\d+)\s*-\s*(?P<title>.+)$', r'^(?P<track>\d+)\s+(?P<title>.+)$',
r'^(?P<track>\d+)\s(?P<title>.+)$',
r'^(?P<title>.+) by (?P<artist>.+)$', r'^(?P<title>.+) by (?P<artist>.+)$',
r'^(?P<track>\d+).*$',
] ]
# Titles considered "empty" and in need of replacement. # Titles considered "empty" and in need of replacement.
BAD_TITLE_PATTERNS = [ BAD_TITLE_PATTERNS = [
r'^$', r'^$',
r'\d+?\s?-?\s*track\s*\d+',
] ]
@ -100,7 +92,7 @@ def apply_matches(d):
"""Given a mapping from items to field dicts, apply the fields to """Given a mapping from items to field dicts, apply the fields to
the objects. the objects.
""" """
some_map = d.values()[0] some_map = list(d.values())[0]
keys = some_map.keys() keys = some_map.keys()
# Only proceed if the "tag" field is equal across all filenames. # Only proceed if the "tag" field is equal across all filenames.
@ -132,7 +124,7 @@ def apply_matches(d):
# Apply the title and track. # Apply the title and track.
for item in d: for item in d:
if bad_title(item.title): if bad_title(item.title):
item.title = unicode(d[item][title_field]) item.title = six.text_type(d[item][title_field])
if 'track' in d[item] and item.track == 0: if 'track' in d[item] and item.track == 0:
item.track = int(d[item]['track']) item.track = int(d[item]['track'])

View file

@ -49,29 +49,28 @@ def find_feat_part(artist, albumartist):
"""Attempt to find featured artists in the item's artist fields and """Attempt to find featured artists in the item's artist fields and
return the results. Returns None if no featured artist found. return the results. Returns None if no featured artist found.
""" """
feat_part = None
# Look for the album artist in the artist field. If it's not # Look for the album artist in the artist field. If it's not
# present, give up. # present, give up.
albumartist_split = artist.split(albumartist, 1) albumartist_split = artist.split(albumartist, 1)
if len(albumartist_split) <= 1: if len(albumartist_split) <= 1:
return feat_part return None
# If the last element of the split (the right-hand side of the # If the last element of the split (the right-hand side of the
# album artist) is nonempty, then it probably contains the # album artist) is nonempty, then it probably contains the
# featured artist. # featured artist.
elif albumartist_split[-1] != '': elif albumartist_split[1] != '':
# Extract the featured artist from the right-hand side. # Extract the featured artist from the right-hand side.
_, feat_part = split_on_feat(albumartist_split[-1]) _, feat_part = split_on_feat(albumartist_split[1])
return feat_part
# Otherwise, if there's nothing on the right-hand side, look for a # Otherwise, if there's nothing on the right-hand side, look for a
# featuring artist on the left-hand side. # featuring artist on the left-hand side.
else: else:
lhs, rhs = split_on_feat(albumartist_split[0]) lhs, rhs = split_on_feat(albumartist_split[0])
if lhs: if lhs:
feat_part = lhs return lhs
return feat_part return None
class FtInTitlePlugin(plugins.BeetsPlugin): class FtInTitlePlugin(plugins.BeetsPlugin):
@ -90,7 +89,7 @@ class FtInTitlePlugin(plugins.BeetsPlugin):
self._command.parser.add_option( self._command.parser.add_option(
u'-d', u'--drop', dest='drop', u'-d', u'--drop', dest='drop',
action='store_true', default=False, action='store_true', default=None,
help=u'drop featuring from artists and ignore title update') help=u'drop featuring from artists and ignore title update')
if self.config['auto']: if self.config['auto']:
@ -137,7 +136,7 @@ class FtInTitlePlugin(plugins.BeetsPlugin):
# Only update the title if it does not already contain a featured # Only update the title if it does not already contain a featured
# artist and if we do not drop featuring information. # artist and if we do not drop featuring information.
if not drop_feat and not contains_feat(item.title): if not drop_feat and not contains_feat(item.title):
feat_format = self.config['format'].get(unicode) feat_format = self.config['format'].as_str()
new_format = feat_format.format(feat_part) new_format = feat_format.format(feat_part)
new_title = u"{0} {1}".format(item.title, new_format) new_title = u"{0} {1}".format(item.title, new_format)
self._log.info(u'title: {0} -> {1}', item.title, new_title) self._log.info(u'title: {0} -> {1}', item.title, new_title)

View file

@ -44,5 +44,5 @@ class FuzzyPlugin(BeetsPlugin):
}) })
def queries(self): def queries(self):
prefix = self.config['prefix'].get(basestring) prefix = self.config['prefix'].as_str()
return {prefix: FuzzyQuery} return {prefix: FuzzyQuery}

96
libs/beetsplug/gmusic.py Normal file
View file

@ -0,0 +1,96 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Tigran Kostandyan.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Upload files to Google Play Music and list songs in its library."""
from __future__ import absolute_import, division, print_function
import os.path
from beets.plugins import BeetsPlugin
from beets import ui
from beets import config
from beets.ui import Subcommand
from gmusicapi import Musicmanager, Mobileclient
from gmusicapi.exceptions import NotLoggedIn
import gmusicapi.clients
class Gmusic(BeetsPlugin):
def __init__(self):
super(Gmusic, self).__init__()
# Checks for OAuth2 credentials,
# if they don't exist - performs authorization
self.m = Musicmanager()
if os.path.isfile(gmusicapi.clients.OAUTH_FILEPATH):
self.m.login()
else:
self.m.perform_oauth()
def commands(self):
gupload = Subcommand('gmusic-upload',
help=u'upload your tracks to Google Play Music')
gupload.func = self.upload
search = Subcommand('gmusic-songs',
help=u'list of songs in Google Play Music library'
)
search.parser.add_option('-t', '--track', dest='track',
action='store_true',
help='Search by track name')
search.parser.add_option('-a', '--artist', dest='artist',
action='store_true',
help='Search by artist')
search.func = self.search
return [gupload, search]
def upload(self, lib, opts, args):
items = lib.items(ui.decargs(args))
files = [x.path.decode('utf-8') for x in items]
ui.print_(u'Uploading your files...')
self.m.upload(filepaths=files)
ui.print_(u'Your files were successfully added to library')
def search(self, lib, opts, args):
password = config['gmusic']['password']
email = config['gmusic']['email']
password.redact = True
email.redact = True
# Since Musicmanager doesn't support library management
# we need to use mobileclient interface
mobile = Mobileclient()
try:
mobile.login(email.as_str(), password.as_str(),
Mobileclient.FROM_MAC_ADDRESS)
files = mobile.get_all_songs()
except NotLoggedIn:
ui.print_(
u'Authentication error. Please check your email and password.'
)
return
if not args:
for i, file in enumerate(files, start=1):
print(i, ui.colorize('blue', file['artist']),
file['title'], ui.colorize('red', file['album']))
else:
if opts.track:
self.match(files, args, 'title')
else:
self.match(files, args, 'artist')
@staticmethod
def match(files, args, search_by):
for file in files:
if ' '.join(ui.decargs(args)) in file[search_by]:
print(file['artist'], file['title'], file['album'])

View file

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2015, Adrian Sampson. # Copyright 2015, Adrian Sampson.
# #
@ -17,15 +18,20 @@ from __future__ import division, absolute_import, print_function
import string import string
import subprocess import subprocess
import six
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.ui import _arg_encoding from beets.util import shlex_split, arg_encoding
from beets.util import shlex_split
class CodingFormatter(string.Formatter): class CodingFormatter(string.Formatter):
"""A custom string formatter that decodes the format string and it's """A variant of `string.Formatter` that converts everything to `unicode`
fields. strings.
This is necessary on Python 2, where formatting otherwise occurs on
bytestrings. It intercepts two points in the formatting process to decode
the format string and all fields using the specified encoding. If decoding
fails, the values are used as-is.
""" """
def __init__(self, coding): def __init__(self, coding):
@ -57,10 +63,9 @@ class CodingFormatter(string.Formatter):
""" """
converted = super(CodingFormatter, self).convert_field(value, converted = super(CodingFormatter, self).convert_field(value,
conversion) conversion)
try:
converted = converted.decode(self._coding) if isinstance(converted, bytes):
except UnicodeEncodeError: return converted.decode(self._coding)
pass
return converted return converted
@ -79,8 +84,8 @@ class HookPlugin(BeetsPlugin):
for hook_index in range(len(hooks)): for hook_index in range(len(hooks)):
hook = self.config['hooks'][hook_index] hook = self.config['hooks'][hook_index]
hook_event = hook['event'].get(unicode) hook_event = hook['event'].as_str()
hook_command = hook['command'].get(unicode) hook_command = hook['command'].as_str()
self.create_and_register_hook(hook_event, hook_command) self.create_and_register_hook(hook_event, hook_command)
@ -90,7 +95,12 @@ class HookPlugin(BeetsPlugin):
self._log.error('invalid command "{0}"', command) self._log.error('invalid command "{0}"', command)
return return
formatter = CodingFormatter(_arg_encoding()) # Use a string formatter that works on Unicode strings.
if six.PY2:
formatter = CodingFormatter(arg_encoding())
else:
formatter = string.Formatter()
command_pieces = shlex_split(command) command_pieces = shlex_split(command)
for i, piece in enumerate(command_pieces): for i, piece in enumerate(command_pieces):

View file

@ -30,12 +30,13 @@ class ImportAddedPlugin(BeetsPlugin):
self.item_mtime = dict() self.item_mtime = dict()
register = self.register_listener register = self.register_listener
register('import_task_start', self.check_config) register('import_task_created', self.check_config)
register('import_task_start', self.record_if_inplace) register('import_task_created', self.record_if_inplace)
register('import_task_files', self.record_reimported) register('import_task_files', self.record_reimported)
register('before_item_moved', self.record_import_mtime) register('before_item_moved', self.record_import_mtime)
register('item_copied', self.record_import_mtime) register('item_copied', self.record_import_mtime)
register('item_linked', self.record_import_mtime) register('item_linked', self.record_import_mtime)
register('item_hardlinked', self.record_import_mtime)
register('album_imported', self.update_album_times) register('album_imported', self.update_album_times)
register('item_imported', self.update_item_times) register('item_imported', self.update_item_times)
register('after_write', self.update_after_write_time) register('after_write', self.update_after_write_time)
@ -51,7 +52,7 @@ class ImportAddedPlugin(BeetsPlugin):
def record_if_inplace(self, task, session): def record_if_inplace(self, task, session):
if not (session.config['copy'] or session.config['move'] or if not (session.config['copy'] or session.config['move'] or
session.config['link']): session.config['link'] or session.config['hardlink']):
self._log.debug(u"In place import detected, recording mtimes from " self._log.debug(u"In place import detected, recording mtimes from "
u"source paths") u"source paths")
items = [task.item] \ items = [task.item] \
@ -62,7 +63,7 @@ class ImportAddedPlugin(BeetsPlugin):
def record_reimported(self, task, session): def record_reimported(self, task, session):
self.reimported_item_ids = set(item.id for item, replaced_items self.reimported_item_ids = set(item.id for item, replaced_items
in task.replaced_items.iteritems() in task.replaced_items.items()
if replaced_items) if replaced_items)
self.replaced_album_paths = set(task.replaced_albums.keys()) self.replaced_album_paths = set(task.replaced_albums.keys())

View file

@ -24,26 +24,12 @@ import os
import re import re
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.util import mkdirall, normpath, syspath, bytestring_path from beets.util import mkdirall, normpath, syspath, bytestring_path, link
from beets import config from beets import config
M3U_DEFAULT_NAME = 'imported.m3u' M3U_DEFAULT_NAME = 'imported.m3u'
def _get_feeds_dir(lib):
"""Given a Library object, return the path to the feeds directory to be
used (either in the library directory or an explicitly configured
path). Ensures that the directory exists.
"""
# Inside library directory.
dirpath = lib.directory
# Ensure directory exists.
if not os.path.exists(syspath(dirpath)):
os.makedirs(syspath(dirpath))
return dirpath
def _build_m3u_filename(basename): def _build_m3u_filename(basename):
"""Builds unique m3u filename by appending given basename to current """Builds unique m3u filename by appending given basename to current
date.""" date."""
@ -61,7 +47,7 @@ def _write_m3u(m3u_path, items_paths):
"""Append relative paths to items into m3u file. """Append relative paths to items into m3u file.
""" """
mkdirall(m3u_path) mkdirall(m3u_path)
with open(syspath(m3u_path), 'a') as f: with open(syspath(m3u_path), 'ab') as f:
for path in items_paths: for path in items_paths:
f.write(path + b'\n') f.write(path + b'\n')
@ -78,30 +64,28 @@ class ImportFeedsPlugin(BeetsPlugin):
'absolute_path': False, 'absolute_path': False,
}) })
feeds_dir = self.config['dir'].get()
if feeds_dir:
feeds_dir = os.path.expanduser(bytestring_path(feeds_dir))
self.config['dir'] = feeds_dir
if not os.path.exists(syspath(feeds_dir)):
os.makedirs(syspath(feeds_dir))
relative_to = self.config['relative_to'].get() relative_to = self.config['relative_to'].get()
if relative_to: if relative_to:
self.config['relative_to'] = normpath(relative_to) self.config['relative_to'] = normpath(relative_to)
else: else:
self.config['relative_to'] = feeds_dir self.config['relative_to'] = self.get_feeds_dir()
self.register_listener('library_opened', self.library_opened)
self.register_listener('album_imported', self.album_imported) self.register_listener('album_imported', self.album_imported)
self.register_listener('item_imported', self.item_imported) self.register_listener('item_imported', self.item_imported)
def get_feeds_dir(self):
feeds_dir = self.config['dir'].get()
if feeds_dir:
return os.path.expanduser(bytestring_path(feeds_dir))
return config['directory'].as_filename()
def _record_items(self, lib, basename, items): def _record_items(self, lib, basename, items):
"""Records relative paths to the given items for each feed format """Records relative paths to the given items for each feed format
""" """
feedsdir = bytestring_path(self.config['dir'].as_filename()) feedsdir = bytestring_path(self.get_feeds_dir())
formats = self.config['formats'].as_str_seq() formats = self.config['formats'].as_str_seq()
relative_to = self.config['relative_to'].get() \ relative_to = self.config['relative_to'].get() \
or self.config['dir'].as_filename() or self.get_feeds_dir()
relative_to = bytestring_path(relative_to) relative_to = bytestring_path(relative_to)
paths = [] paths = []
@ -119,7 +103,7 @@ class ImportFeedsPlugin(BeetsPlugin):
if 'm3u' in formats: if 'm3u' in formats:
m3u_basename = bytestring_path( m3u_basename = bytestring_path(
self.config['m3u_name'].get(unicode)) self.config['m3u_name'].as_str())
m3u_path = os.path.join(feedsdir, m3u_basename) m3u_path = os.path.join(feedsdir, m3u_basename)
_write_m3u(m3u_path, paths) _write_m3u(m3u_path, paths)
@ -131,17 +115,13 @@ class ImportFeedsPlugin(BeetsPlugin):
for path in paths: for path in paths:
dest = os.path.join(feedsdir, os.path.basename(path)) dest = os.path.join(feedsdir, os.path.basename(path))
if not os.path.exists(syspath(dest)): if not os.path.exists(syspath(dest)):
os.symlink(syspath(path), syspath(dest)) link(path, dest)
if 'echo' in formats: if 'echo' in formats:
self._log.info(u"Location of imported music:") self._log.info(u"Location of imported music:")
for path in paths: for path in paths:
self._log.info(u" {0}", path) self._log.info(u" {0}", path)
def library_opened(self, lib):
if self.config['dir'].get() is None:
self.config['dir'] = _get_feeds_dir(lib)
def album_imported(self, lib, album): def album_imported(self, lib, album):
self._record_items(lib, album.album, album.items()) self._record_items(lib, album.album, album.items())

View file

@ -73,7 +73,7 @@ def library_data_emitter(item):
def update_summary(summary, tags): def update_summary(summary, tags):
for key, value in tags.iteritems(): for key, value in tags.items():
if key not in summary: if key not in summary:
summary[key] = value summary[key] = value
elif summary[key] != value: elif summary[key] != value:
@ -96,7 +96,7 @@ def print_data(data, item=None, fmt=None):
path = displayable_path(item.path) if item else None path = displayable_path(item.path) if item else None
formatted = {} formatted = {}
for key, value in data.iteritems(): for key, value in data.items():
if isinstance(value, list): if isinstance(value, list):
formatted[key] = u'; '.join(value) formatted[key] = u'; '.join(value)
if value is not None: if value is not None:
@ -123,7 +123,7 @@ def print_data_keys(data, item=None):
""" """
path = displayable_path(item.path) if item else None path = displayable_path(item.path) if item else None
formatted = [] formatted = []
for key, value in data.iteritems(): for key, value in data.items():
formatted.append(key) formatted.append(key)
if len(formatted) == 0: if len(formatted) == 0:
@ -204,7 +204,8 @@ class InfoPlugin(BeetsPlugin):
if opts.keys_only: if opts.keys_only:
print_data_keys(data, item) print_data_keys(data, item)
else: else:
print_data(data, item, opts.format) fmt = ui.decargs([opts.format])[0] if opts.format else None
print_data(data, item, fmt)
first = False first = False
if opts.summarize: if opts.summarize:
@ -230,7 +231,7 @@ def make_key_filter(include):
def filter_(data): def filter_(data):
filtered = dict() filtered = dict()
for key, value in data.items(): for key, value in data.items():
if any(map(lambda m: m.match(key), matchers)): if any([m.match(key) for m in matchers]):
filtered[key] = value filtered[key] = value
return filtered return filtered

View file

@ -22,6 +22,7 @@ import itertools
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets import config from beets import config
import six
FUNC_NAME = u'__INLINE_FUNC__' FUNC_NAME = u'__INLINE_FUNC__'
@ -32,7 +33,7 @@ class InlineError(Exception):
def __init__(self, code, exc): def __init__(self, code, exc):
super(InlineError, self).__init__( super(InlineError, self).__init__(
(u"error in inline path field code:\n" (u"error in inline path field code:\n"
u"%s\n%s: %s") % (code, type(exc).__name__, unicode(exc)) u"%s\n%s: %s") % (code, type(exc).__name__, six.text_type(exc))
) )
@ -64,14 +65,14 @@ class InlinePlugin(BeetsPlugin):
for key, view in itertools.chain(config['item_fields'].items(), for key, view in itertools.chain(config['item_fields'].items(),
config['pathfields'].items()): config['pathfields'].items()):
self._log.debug(u'adding item field {0}', key) self._log.debug(u'adding item field {0}', key)
func = self.compile_inline(view.get(unicode), False) func = self.compile_inline(view.as_str(), False)
if func is not None: if func is not None:
self.template_fields[key] = func self.template_fields[key] = func
# Album fields. # Album fields.
for key, view in config['album_fields'].items(): for key, view in config['album_fields'].items():
self._log.debug(u'adding album field {0}', key) self._log.debug(u'adding album field {0}', key)
func = self.compile_inline(view.get(unicode), True) func = self.compile_inline(view.as_str(), True)
if func is not None: if func is not None:
self.album_template_fields[key] = func self.album_template_fields[key] = func

View file

@ -272,9 +272,11 @@ class IPFSPlugin(BeetsPlugin):
break break
except AttributeError: except AttributeError:
pass pass
item_path = os.path.basename(item.path).decode(
util._fsencoding(), 'ignore'
)
# Clear current path from item # Clear current path from item
item.path = '/ipfs/{0}/{1}'.format(album.ipfs, item.path = '/ipfs/{0}/{1}'.format(album.ipfs, item_path)
os.path.basename(item.path))
item.id = None item.id = None
items.append(item) items.append(item)

View file

@ -48,18 +48,18 @@ class KeyFinderPlugin(BeetsPlugin):
self.find_key(lib.items(ui.decargs(args)), write=ui.should_write()) self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())
def imported(self, session, task): def imported(self, session, task):
self.find_key(task.items) self.find_key(task.imported_items())
def find_key(self, items, write=False): def find_key(self, items, write=False):
overwrite = self.config['overwrite'].get(bool) overwrite = self.config['overwrite'].get(bool)
bin = util.bytestring_path(self.config['bin'].get(unicode)) bin = self.config['bin'].as_str()
for item in items: for item in items:
if item['initial_key'] and not overwrite: if item['initial_key'] and not overwrite:
continue continue
try: try:
output = util.command_output([bin, b'-f', output = util.command_output([bin, '-f',
util.syspath(item.path)]) util.syspath(item.path)])
except (subprocess.CalledProcessError, OSError) as exc: except (subprocess.CalledProcessError, OSError) as exc:
self._log.error(u'execution failed: {0}', exc) self._log.error(u'execution failed: {0}', exc)
@ -73,7 +73,7 @@ class KeyFinderPlugin(BeetsPlugin):
key_raw = output.rsplit(None, 1)[-1] key_raw = output.rsplit(None, 1)[-1]
try: try:
key = key_raw.decode('utf8') key = util.text_string(key_raw)
except UnicodeDecodeError: except UnicodeDecodeError:
self._log.error(u'output is invalid UTF-8') self._log.error(u'output is invalid UTF-8')
continue continue

View file

@ -0,0 +1,98 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Pauli Kettunen.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Kodi library whenever the beets library is changed.
This is based on the Plex Update plugin.
Put something like the following in your config.yaml to configure:
kodi:
host: localhost
port: 8080
user: user
pwd: secret
"""
from __future__ import division, absolute_import, print_function
import requests
from beets import config
from beets.plugins import BeetsPlugin
import six
def update_kodi(host, port, user, password):
"""Sends request to the Kodi api to start a library refresh.
"""
url = "http://{0}:{1}/jsonrpc".format(host, port)
"""Content-Type: application/json is mandatory
according to the kodi jsonrpc documentation"""
headers = {'Content-Type': 'application/json'}
# Create the payload. Id seems to be mandatory.
payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1}
r = requests.post(
url,
auth=(user, password),
json=payload,
headers=headers)
return r
class KodiUpdate(BeetsPlugin):
def __init__(self):
super(KodiUpdate, self).__init__()
# Adding defaults.
config['kodi'].add({
u'host': u'localhost',
u'port': 8080,
u'user': u'kodi',
u'pwd': u'kodi'})
config['kodi']['pwd'].redact = True
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Kodi server.
"""
self._log.info(u'Requesting a Kodi library update...')
# Try to send update request.
try:
r = update_kodi(
config['kodi']['host'].get(),
config['kodi']['port'].get(),
config['kodi']['user'].get(),
config['kodi']['pwd'].get())
r.raise_for_status()
except requests.exceptions.RequestException as e:
self._log.warning(u'Kodi update failed: {0}',
six.text_type(e))
return
json = r.json()
if json.get('result') != 'OK':
self._log.warning(u'Kodi update failed: JSON response was {0!r}',
json)
return
self._log.info(u'Kodi update triggered')

View file

@ -14,6 +14,7 @@
# included in all copies or substantial portions of the Software. # included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import six
"""Gets genres for imported music based on Last.fm tags. """Gets genres for imported music based on Last.fm tags.
@ -24,6 +25,7 @@ The scraper script used is available here:
https://gist.github.com/1241307 https://gist.github.com/1241307
""" """
import pylast import pylast
import codecs
import os import os
import yaml import yaml
import traceback import traceback
@ -71,7 +73,7 @@ def flatten_tree(elem, path, branches):
for sub in elem: for sub in elem:
flatten_tree(sub, path, branches) flatten_tree(sub, path, branches)
else: else:
branches.append(path + [unicode(elem)]) branches.append(path + [six.text_type(elem)])
def find_parents(candidate, branches): def find_parents(candidate, branches):
@ -107,6 +109,7 @@ class LastGenrePlugin(plugins.BeetsPlugin):
'force': True, 'force': True,
'auto': True, 'auto': True,
'separator': u', ', 'separator': u', ',
'prefer_specific': False,
}) })
self.setup() self.setup()
@ -126,9 +129,9 @@ class LastGenrePlugin(plugins.BeetsPlugin):
wl_filename = WHITELIST wl_filename = WHITELIST
if wl_filename: if wl_filename:
wl_filename = normpath(wl_filename) wl_filename = normpath(wl_filename)
with open(wl_filename, 'r') as f: with open(wl_filename, 'rb') as f:
for line in f: for line in f:
line = line.decode('utf8').strip().lower() line = line.decode('utf-8').strip().lower()
if line and not line.startswith(u'#'): if line and not line.startswith(u'#'):
self.whitelist.add(line) self.whitelist.add(line)
@ -139,7 +142,8 @@ class LastGenrePlugin(plugins.BeetsPlugin):
c14n_filename = C14N_TREE c14n_filename = C14N_TREE
if c14n_filename: if c14n_filename:
c14n_filename = normpath(c14n_filename) c14n_filename = normpath(c14n_filename)
genres_tree = yaml.load(open(c14n_filename, 'r')) with codecs.open(c14n_filename, 'r', encoding='utf-8') as f:
genres_tree = yaml.load(f)
flatten_tree(genres_tree, [], self.c14n_branches) flatten_tree(genres_tree, [], self.c14n_branches)
@property @property
@ -155,6 +159,25 @@ class LastGenrePlugin(plugins.BeetsPlugin):
elif source == 'artist': elif source == 'artist':
return 'artist', return 'artist',
def _get_depth(self, tag):
"""Find the depth of a tag in the genres tree.
"""
depth = None
for key, value in enumerate(self.c14n_branches):
if tag in value:
depth = value.index(tag)
break
return depth
def _sort_by_depth(self, tags):
"""Given a list of tags, sort the tags by their depths in the
genre tree.
"""
depth_tag_pairs = [(self._get_depth(t), t) for t in tags]
depth_tag_pairs = [e for e in depth_tag_pairs if e[0] is not None]
depth_tag_pairs.sort(reverse=True)
return [p[1] for p in depth_tag_pairs]
def _resolve_genres(self, tags): def _resolve_genres(self, tags):
"""Given a list of strings, return a genre by joining them into a """Given a list of strings, return a genre by joining them into a
single string and (optionally) canonicalizing each. single string and (optionally) canonicalizing each.
@ -176,17 +199,24 @@ class LastGenrePlugin(plugins.BeetsPlugin):
parents = [find_parents(tag, self.c14n_branches)[-1]] parents = [find_parents(tag, self.c14n_branches)[-1]]
tags_all += parents tags_all += parents
if len(tags_all) >= count: # Stop if we have enough tags already, unless we need to find
# the most specific tag (instead of the most popular).
if (not self.config['prefer_specific'] and
len(tags_all) >= count):
break break
tags = tags_all tags = tags_all
tags = deduplicate(tags) tags = deduplicate(tags)
# Sort the tags by specificity.
if self.config['prefer_specific']:
tags = self._sort_by_depth(tags)
# c14n only adds allowed genres but we may have had forbidden genres in # c14n only adds allowed genres but we may have had forbidden genres in
# the original tags list # the original tags list
tags = [x.title() for x in tags if self._is_allowed(x)] tags = [x.title() for x in tags if self._is_allowed(x)]
return self.config['separator'].get(unicode).join( return self.config['separator'].as_str().join(
tags[:self.config['count'].get(int)] tags[:self.config['count'].get(int)]
) )
@ -221,7 +251,8 @@ class LastGenrePlugin(plugins.BeetsPlugin):
if any(not s for s in args): if any(not s for s in args):
return None return None
key = u'{0}.{1}'.format(entity, u'-'.join(unicode(a) for a in args)) key = u'{0}.{1}'.format(entity,
u'-'.join(six.text_type(a) for a in args))
if key in self._genre_cache: if key in self._genre_cache:
return self._genre_cache[key] return self._genre_cache[key]
else: else:
@ -297,7 +328,7 @@ class LastGenrePlugin(plugins.BeetsPlugin):
result = None result = None
if isinstance(obj, library.Item): if isinstance(obj, library.Item):
result = self.fetch_artist_genre(obj) result = self.fetch_artist_genre(obj)
elif obj.albumartist != config['va_name'].get(unicode): elif obj.albumartist != config['va_name'].as_str():
result = self.fetch_album_artist_genre(obj) result = self.fetch_album_artist_genre(obj)
else: else:
# For "Various Artists", pick the most popular track genre. # For "Various Artists", pick the most popular track genre.
@ -400,7 +431,7 @@ class LastGenrePlugin(plugins.BeetsPlugin):
""" """
# Work around an inconsistency in pylast where # Work around an inconsistency in pylast where
# Album.get_top_tags() does not return TopItem instances. # Album.get_top_tags() does not return TopItem instances.
# https://code.google.com/p/pylast/issues/detail?id=85 # https://github.com/pylast/pylast/issues/86
if isinstance(obj, pylast.Album): if isinstance(obj, pylast.Album):
obj = super(pylast.Album, obj) obj = super(pylast.Album, obj)

View file

@ -23,7 +23,7 @@ from beets import config
from beets import plugins from beets import plugins
from beets.dbcore import types from beets.dbcore import types
API_URL = 'http://ws.audioscrobbler.com/2.0/' API_URL = 'https://ws.audioscrobbler.com/2.0/'
class LastImportPlugin(plugins.BeetsPlugin): class LastImportPlugin(plugins.BeetsPlugin):
@ -110,7 +110,7 @@ class CustomUser(pylast.User):
def import_lastfm(lib, log): def import_lastfm(lib, log):
user = config['lastfm']['user'].get(unicode) user = config['lastfm']['user'].as_str()
per_page = config['lastimport']['per_page'].get(int) per_page = config['lastimport']['per_page'].get(int)
if not user: if not user:
@ -192,7 +192,7 @@ def process_tracks(lib, tracks, log):
total_fails = 0 total_fails = 0
log.info(u'Received {0} tracks in this page, processing...', total) log.info(u'Received {0} tracks in this page, processing...', total)
for num in xrange(0, total): for num in range(0, total):
song = None song = None
trackid = tracks[num]['mbid'].strip() trackid = tracks[num]['mbid'].strip()
artist = tracks[num]['artist'].get('name', '').strip() artist = tracks[num]['artist'].get('name', '').strip()

View file

@ -19,14 +19,18 @@
from __future__ import absolute_import, division, print_function from __future__ import absolute_import, division, print_function
import difflib import difflib
import errno
import itertools import itertools
import json import json
import struct
import os.path
import re import re
import requests import requests
import unicodedata import unicodedata
import urllib from unidecode import unidecode
import warnings import warnings
from HTMLParser import HTMLParseError import six
from six.moves import urllib
try: try:
from bs4 import SoupStrainer, BeautifulSoup from bs4 import SoupStrainer, BeautifulSoup
@ -40,9 +44,18 @@ try:
except ImportError: except ImportError:
HAS_LANGDETECT = False HAS_LANGDETECT = False
try:
# PY3: HTMLParseError was removed in 3.5 as strict mode
# was deprecated in 3.3.
# https://docs.python.org/3.3/library/html.parser.html
from six.moves.html_parser import HTMLParseError
except ImportError:
class HTMLParseError(Exception):
pass
from beets import plugins from beets import plugins
from beets import ui from beets import ui
import beets
DIV_RE = re.compile(r'<(/?)div>?', re.I) DIV_RE = re.compile(r'<(/?)div>?', re.I)
COMMENT_RE = re.compile(r'<!--.*-->', re.S) COMMENT_RE = re.compile(r'<!--.*-->', re.S)
@ -62,20 +75,62 @@ URL_CHARACTERS = {
u'\u2016': u'-', u'\u2016': u'-',
u'\u2026': u'...', u'\u2026': u'...',
} }
USER_AGENT = 'beets/{}'.format(beets.__version__)
# The content for the base index.rst generated in ReST mode.
REST_INDEX_TEMPLATE = u'''Lyrics
======
* :ref:`Song index <genindex>`
* :ref:`search`
Artist index:
.. toctree::
:maxdepth: 1
:glob:
artists/*
'''
# The content for the base conf.py generated.
REST_CONF_TEMPLATE = u'''# -*- coding: utf-8 -*-
master_doc = 'index'
project = u'Lyrics'
copyright = u'none'
author = u'Various Authors'
latex_documents = [
(master_doc, 'Lyrics.tex', project,
author, 'manual'),
]
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ['search.html']
epub_tocdepth = 1
epub_tocdup = False
'''
# Utilities. # Utilities.
def unichar(i):
try:
return six.unichr(i)
except ValueError:
return struct.pack('i', i).decode('utf-32')
def unescape(text): def unescape(text):
"""Resolve &#xxx; HTML entities (and some others).""" """Resolve &#xxx; HTML entities (and some others)."""
if isinstance(text, bytes): if isinstance(text, bytes):
text = text.decode('utf8', 'ignore') text = text.decode('utf-8', 'ignore')
out = text.replace(u'&nbsp;', u' ') out = text.replace(u'&nbsp;', u' ')
def replchar(m): def replchar(m):
num = m.group(1) num = m.group(1)
return unichr(int(num)) return unichar(int(num))
out = re.sub(u"&#(\d+);", replchar, out) out = re.sub(u"&#(\d+);", replchar, out)
return out return out
@ -93,7 +148,6 @@ def extract_text_in(html, starttag):
"""Extract the text from a <DIV> tag in the HTML starting with """Extract the text from a <DIV> tag in the HTML starting with
``starttag``. Returns None if parsing fails. ``starttag``. Returns None if parsing fails.
""" """
# Strip off the leading text before opening tag. # Strip off the leading text before opening tag.
try: try:
_, html = html.split(starttag, 1) _, html = html.split(starttag, 1)
@ -134,30 +188,33 @@ def search_pairs(item):
and featured artists from the strings and add them as candidates. and featured artists from the strings and add them as candidates.
The method also tries to split multiple titles separated with `/`. The method also tries to split multiple titles separated with `/`.
""" """
def generate_alternatives(string, patterns):
"""Generate string alternatives by extracting first matching group for
each given pattern.
"""
alternatives = [string]
for pattern in patterns:
match = re.search(pattern, string, re.IGNORECASE)
if match:
alternatives.append(match.group(1))
return alternatives
title, artist = item.title, item.artist title, artist = item.title, item.artist
titles = [title]
artists = [artist]
patterns = [
# Remove any featuring artists from the artists name # Remove any featuring artists from the artists name
pattern = r"(.*?) {0}".format(plugins.feat_tokens()) r"(.*?) {0}".format(plugins.feat_tokens())]
match = re.search(pattern, artist, re.IGNORECASE) artists = generate_alternatives(artist, patterns)
if match:
artists.append(match.group(1))
patterns = [
# Remove a parenthesized suffix from a title string. Common # Remove a parenthesized suffix from a title string. Common
# examples include (live), (remix), and (acoustic). # examples include (live), (remix), and (acoustic).
pattern = r"(.+?)\s+[(].*[)]$" r"(.+?)\s+[(].*[)]$",
match = re.search(pattern, title, re.IGNORECASE)
if match:
titles.append(match.group(1))
# Remove any featuring artists from the title # Remove any featuring artists from the title
pattern = r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False)) r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False)),
for title in titles[:]: # Remove part of title after colon ':' for songs with subtitles
match = re.search(pattern, title, re.IGNORECASE) r"(.+?)\s*:.*"]
if match: titles = generate_alternatives(title, patterns)
titles.append(match.group(1))
# Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe) # Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)
# and each of them. # and each of them.
@ -170,6 +227,24 @@ def search_pairs(item):
return itertools.product(artists, multi_titles) return itertools.product(artists, multi_titles)
def slug(text):
"""Make a URL-safe, human-readable version of the given text
This will do the following:
1. decode unicode characters into ASCII
2. shift everything to lowercase
3. strip whitespace
4. replace other non-word characters with dashes
5. strip extra dashes
This somewhat duplicates the :func:`Google.slugify` function but
slugify is not as generic as this one, which can be reused
elsewhere.
"""
return re.sub(r'\W+', '-', unidecode(text).lower().strip()).strip('-')
class Backend(object): class Backend(object):
def __init__(self, config, log): def __init__(self, config, log):
self._log = log self._log = log
@ -177,11 +252,11 @@ class Backend(object):
@staticmethod @staticmethod
def _encode(s): def _encode(s):
"""Encode the string for inclusion in a URL""" """Encode the string for inclusion in a URL"""
if isinstance(s, unicode): if isinstance(s, six.text_type):
for char, repl in URL_CHARACTERS.items(): for char, repl in URL_CHARACTERS.items():
s = s.replace(char, repl) s = s.replace(char, repl)
s = s.encode('utf8', 'ignore') s = s.encode('utf-8', 'ignore')
return urllib.quote(s) return urllib.parse.quote(s)
def build_url(self, artist, title): def build_url(self, artist, title):
return self.URL_PATTERN % (self._encode(artist.title()), return self.URL_PATTERN % (self._encode(artist.title()),
@ -198,7 +273,9 @@ class Backend(object):
# We're not overly worried about the NSA MITMing our lyrics scraper # We're not overly worried about the NSA MITMing our lyrics scraper
with warnings.catch_warnings(): with warnings.catch_warnings():
warnings.simplefilter('ignore') warnings.simplefilter('ignore')
r = requests.get(url, verify=False) r = requests.get(url, verify=False, headers={
'User-Agent': USER_AGENT,
})
except requests.RequestException as exc: except requests.RequestException as exc:
self._log.debug(u'lyrics request failed: {0}', exc) self._log.debug(u'lyrics request failed: {0}', exc)
return return
@ -218,12 +295,12 @@ class SymbolsReplaced(Backend):
'>': 'Greater_Than', '>': 'Greater_Than',
'#': 'Number_', '#': 'Number_',
r'[\[\{]': '(', r'[\[\{]': '(',
r'[\[\{]': ')' r'[\]\}]': ')',
} }
@classmethod @classmethod
def _encode(cls, s): def _encode(cls, s):
for old, new in cls.REPLACEMENTS.iteritems(): for old, new in cls.REPLACEMENTS.items():
s = re.sub(old, new, s) s = re.sub(old, new, s)
return super(SymbolsReplaced, cls)._encode(s) return super(SymbolsReplaced, cls)._encode(s)
@ -238,104 +315,97 @@ class MusiXmatch(SymbolsReplaced):
def fetch(self, artist, title): def fetch(self, artist, title):
url = self.build_url(artist, title) url = self.build_url(artist, title)
html = self.fetch_url(url) html = self.fetch_url(url)
if not html: if not html:
return return
lyrics = extract_text_between(html, if "We detected that your IP is blocked" in html:
'"body":', '"language":') self._log.warning(u'we are blocked at MusixMatch: url %s failed'
return lyrics.strip(',"').replace('\\n', '\n') % url)
return
html_part = html.split('<p class="mxm-lyrics__content')[-1]
lyrics = extract_text_between(html_part, '>', '</p>')
lyrics = lyrics.strip(',"').replace('\\n', '\n')
# another odd case: sometimes only that string remains, for
# missing songs. this seems to happen after being blocked
# above, when filling in the CAPTCHA.
if "Instant lyrics for all your music." in lyrics:
return
return lyrics
class Genius(Backend): class Genius(Backend):
"""Fetch lyrics from Genius via genius-api.""" """Fetch lyrics from Genius via genius-api.
Simply adapted from
bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/
"""
base_url = "https://api.genius.com"
def __init__(self, config, log): def __init__(self, config, log):
super(Genius, self).__init__(config, log) super(Genius, self).__init__(config, log)
self.api_key = config['genius_api_key'].get(unicode) self.api_key = config['genius_api_key'].as_str()
self.headers = {'Authorization': "Bearer %s" % self.api_key} self.headers = {
'Authorization': "Bearer %s" % self.api_key,
'User-Agent': USER_AGENT,
}
def search_genius(self, artist, title): def lyrics_from_song_api_path(self, song_api_path):
query = u"%s %s" % (artist, title) song_url = self.base_url + song_api_path
url = u'https://api.genius.com/search?q=%s' \ response = requests.get(song_url, headers=self.headers)
% (urllib.quote(query.encode('utf8'))) json = response.json()
path = json["response"]["song"]["path"]
self._log.debug(u'genius: requesting search {}', url) # Gotta go regular html scraping... come on Genius.
page_url = "https://genius.com" + path
try: try:
req = requests.get( page = requests.get(page_url)
url,
headers=self.headers,
allow_redirects=True
)
req.raise_for_status()
except requests.RequestException as exc: except requests.RequestException as exc:
self._log.debug(u'genius: request error: {}', exc) self._log.debug(u'Genius page request for {0} failed: {1}',
page_url, exc)
return None return None
html = BeautifulSoup(page.text, "html.parser")
try: # Remove script tags that they put in the middle of the lyrics.
return req.json() [h.extract() for h in html('script')]
except ValueError:
self._log.debug(u'genius: invalid response: {}', req.text)
return None
def get_lyrics(self, link): # At least Genius is nice and has a tag called 'lyrics'!
url = u'http://genius-api.com/api/lyricsInfo' # Updated css where the lyrics are based in HTML.
lyrics = html.find("div", class_="lyrics").get_text()
self._log.debug(u'genius: requesting lyrics for link {}', link)
try:
req = requests.post(
url,
data={'link': link},
headers=self.headers,
allow_redirects=True
)
req.raise_for_status()
except requests.RequestException as exc:
self._log.debug(u'genius: request error: {}', exc)
return None
try:
return req.json()
except ValueError:
self._log.debug(u'genius: invalid response: {}', req.text)
return None
def build_lyric_string(self, lyrics):
if 'lyrics' not in lyrics:
return
sections = lyrics['lyrics']['sections']
lyrics_list = []
for section in sections:
lyrics_list.append(section['name'])
lyrics_list.append('\n')
for verse in section['verses']:
if 'content' in verse:
lyrics_list.append(verse['content'])
return ''.join(lyrics_list)
def fetch(self, artist, title):
search_data = self.search_genius(artist, title)
if not search_data:
return
if not search_data['meta']['status'] == 200:
return
else:
records = search_data['response']['hits']
if not records:
return
record_url = records[0]['result']['url']
lyric_data = self.get_lyrics(record_url)
if not lyric_data:
return
lyrics = self.build_lyric_string(lyric_data)
return lyrics return lyrics
def fetch(self, artist, title):
search_url = self.base_url + "/search"
data = {'q': title}
try:
response = requests.get(search_url, data=data,
headers=self.headers)
except requests.RequestException as exc:
self._log.debug(u'Genius API request failed: {0}', exc)
return None
try:
json = response.json()
except ValueError:
self._log.debug(u'Genius API request returned invalid JSON')
return None
song_info = None
for hit in json["response"]["hits"]:
if hit["result"]["primary_artist"]["name"] == artist:
song_info = hit
break
if song_info:
song_api_path = song_info["result"]["api_path"]
return self.lyrics_from_song_api_path(song_api_path)
class LyricsWiki(SymbolsReplaced): class LyricsWiki(SymbolsReplaced):
"""Fetch lyrics from LyricsWiki.""" """Fetch lyrics from LyricsWiki."""
URL_PATTERN = 'http://lyrics.wikia.com/%s:%s' URL_PATTERN = 'http://lyrics.wikia.com/%s:%s'
def fetch(self, artist, title): def fetch(self, artist, title):
@ -354,38 +424,6 @@ class LyricsWiki(SymbolsReplaced):
return lyrics return lyrics
class LyricsCom(Backend):
"""Fetch lyrics from Lyrics.com."""
URL_PATTERN = 'http://www.lyrics.com/%s-lyrics-%s.html'
NOT_FOUND = (
'Sorry, we do not have the lyric',
'Submit Lyrics',
)
@classmethod
def _encode(cls, s):
s = re.sub(r'[^\w\s-]', '', s)
s = re.sub(r'\s+', '-', s)
return super(LyricsCom, cls)._encode(s).lower()
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return
lyrics = extract_text_between(html, '<div id="lyrics" class="SCREENO'
'NLY" itemprop="description">', '</div>')
if not lyrics:
return
for not_found_str in self.NOT_FOUND:
if not_found_str in lyrics:
return
parts = lyrics.split('\n---\nLyrics powered by', 1)
if parts:
return parts[0]
def remove_credits(text): def remove_credits(text):
"""Remove first/last line of text if it contains the word 'lyrics' """Remove first/last line of text if it contains the word 'lyrics'
eg 'Lyrics by songsdatabase.com' eg 'Lyrics by songsdatabase.com'
@ -459,10 +497,11 @@ def scrape_lyrics_from_html(html):
class Google(Backend): class Google(Backend):
"""Fetch lyrics from Google search results.""" """Fetch lyrics from Google search results."""
def __init__(self, config, log): def __init__(self, config, log):
super(Google, self).__init__(config, log) super(Google, self).__init__(config, log)
self.api_key = config['google_API_key'].get(unicode) self.api_key = config['google_API_key'].as_str()
self.engine_id = config['google_engine_ID'].get(unicode) self.engine_id = config['google_engine_ID'].as_str()
def is_lyrics(self, text, artist=None): def is_lyrics(self, text, artist=None):
"""Determine whether the text seems to be valid lyrics. """Determine whether the text seems to be valid lyrics.
@ -503,7 +542,7 @@ class Google(Backend):
try: try:
text = unicodedata.normalize('NFKD', text).encode('ascii', text = unicodedata.normalize('NFKD', text).encode('ascii',
'ignore') 'ignore')
text = unicode(re.sub('[-\s]+', ' ', text)) text = six.text_type(re.sub('[-\s]+', ' ', text.decode('utf-8')))
except UnicodeDecodeError: except UnicodeDecodeError:
self._log.exception(u"Failing to normalize '{0}'", text) self._log.exception(u"Failing to normalize '{0}'", text)
return text return text
@ -542,14 +581,20 @@ class Google(Backend):
query = u"%s %s" % (artist, title) query = u"%s %s" % (artist, title)
url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \ url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \
% (self.api_key, self.engine_id, % (self.api_key, self.engine_id,
urllib.quote(query.encode('utf8'))) urllib.parse.quote(query.encode('utf-8')))
data = urllib.urlopen(url) data = self.fetch_url(url)
data = json.load(data) if not data:
self._log.debug(u'google backend returned no data')
return None
try:
data = json.loads(data)
except ValueError as exc:
self._log.debug(u'google backend returned malformed JSON: {}', exc)
if 'error' in data: if 'error' in data:
reason = data['error']['errors'][0]['reason'] reason = data['error']['errors'][0]['reason']
self._log.debug(u'google lyrics backend error: {0}', reason) self._log.debug(u'google backend error: {0}', reason)
return return None
if 'items' in data.keys(): if 'items' in data.keys():
for item in data['items']: for item in data['items']:
@ -570,11 +615,10 @@ class Google(Backend):
class LyricsPlugin(plugins.BeetsPlugin): class LyricsPlugin(plugins.BeetsPlugin):
SOURCES = ['google', 'lyricwiki', 'lyrics.com', 'musixmatch'] SOURCES = ['google', 'lyricwiki', 'musixmatch', 'genius']
SOURCE_BACKENDS = { SOURCE_BACKENDS = {
'google': Google, 'google': Google,
'lyricwiki': LyricsWiki, 'lyricwiki': LyricsWiki,
'lyrics.com': LyricsCom,
'musixmatch': MusiXmatch, 'musixmatch': MusiXmatch,
'genius': Genius, 'genius': Genius,
} }
@ -594,6 +638,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
"76V-uFL5jks5dNvcGCdarqFjDhP9c", "76V-uFL5jks5dNvcGCdarqFjDhP9c",
'fallback': None, 'fallback': None,
'force': False, 'force': False,
'local': False,
'sources': self.SOURCES, 'sources': self.SOURCES,
}) })
self.config['bing_client_secret'].redact = True self.config['bing_client_secret'].redact = True
@ -601,28 +646,47 @@ class LyricsPlugin(plugins.BeetsPlugin):
self.config['google_engine_ID'].redact = True self.config['google_engine_ID'].redact = True
self.config['genius_api_key'].redact = True self.config['genius_api_key'].redact = True
# State information for the ReST writer.
# First, the current artist we're writing.
self.artist = u'Unknown artist'
# The current album: False means no album yet.
self.album = False
# The current rest file content. None means the file is not
# open yet.
self.rest = None
available_sources = list(self.SOURCES) available_sources = list(self.SOURCES)
sources = plugins.sanitize_choices( sources = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources) self.config['sources'].as_str_seq(), available_sources)
if 'google' in sources: if 'google' in sources:
if not self.config['google_API_key'].get(): if not self.config['google_API_key'].get():
self._log.warn(u'To use the google lyrics source, you must ' # We log a *debug* message here because the default
u'provide an API key in the configuration. ' # configuration includes `google`. This way, the source
u'See the documentation for further details.') # is silent by default but can be enabled just by
# setting an API key.
self._log.debug(u'Disabling google source: '
u'no API key configured.')
sources.remove('google') sources.remove('google')
if not HAS_BEAUTIFUL_SOUP: elif not HAS_BEAUTIFUL_SOUP:
self._log.warn(u'To use the google lyrics source, you must ' self._log.warning(u'To use the google lyrics source, you must '
u'install the beautifulsoup4 module. See the ' u'install the beautifulsoup4 module. See '
u'documentation for further details.') u'the documentation for further details.')
sources.remove('google') sources.remove('google')
if 'genius' in sources and not HAS_BEAUTIFUL_SOUP:
self._log.debug(
u'The Genius backend requires BeautifulSoup, which is not '
u'installed, so the source is disabled.'
)
sources.remove('genius')
self.config['bing_lang_from'] = [ self.config['bing_lang_from'] = [
x.lower() for x in self.config['bing_lang_from'].as_str_seq()] x.lower() for x in self.config['bing_lang_from'].as_str_seq()]
self.bing_auth_token = None self.bing_auth_token = None
if not HAS_LANGDETECT and self.config['bing_client_secret'].get(): if not HAS_LANGDETECT and self.config['bing_client_secret'].get():
self._log.warn(u'To use bing translations, you need to ' self._log.warning(u'To use bing translations, you need to '
u'install the langdetect module. See the ' u'install the langdetect module. See the '
u'documentation for further details.') u'documentation for further details.')
@ -633,14 +697,14 @@ class LyricsPlugin(plugins.BeetsPlugin):
params = { params = {
'client_id': 'beets', 'client_id': 'beets',
'client_secret': self.config['bing_client_secret'], 'client_secret': self.config['bing_client_secret'],
'scope': 'http://api.microsofttranslator.com', 'scope': "https://api.microsofttranslator.com",
'grant_type': 'client_credentials', 'grant_type': 'client_credentials',
} }
oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13' oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'
oauth_token = json.loads(requests.post( oauth_token = json.loads(requests.post(
oauth_url, oauth_url,
data=urllib.urlencode(params)).content) data=urllib.parse.urlencode(params)).content)
if 'access_token' in oauth_token: if 'access_token' in oauth_token:
return "Bearer " + oauth_token['access_token'] return "Bearer " + oauth_token['access_token']
else: else:
@ -654,27 +718,106 @@ class LyricsPlugin(plugins.BeetsPlugin):
action='store_true', default=False, action='store_true', default=False,
help=u'print lyrics to console', help=u'print lyrics to console',
) )
cmd.parser.add_option(
u'-r', u'--write-rest', dest='writerest',
action='store', default=None, metavar='dir',
help=u'write lyrics to given directory as ReST files',
)
cmd.parser.add_option( cmd.parser.add_option(
u'-f', u'--force', dest='force_refetch', u'-f', u'--force', dest='force_refetch',
action='store_true', default=False, action='store_true', default=False,
help=u'always re-download lyrics', help=u'always re-download lyrics',
) )
cmd.parser.add_option(
u'-l', u'--local', dest='local_only',
action='store_true', default=False,
help=u'do not fetch missing lyrics',
)
def func(lib, opts, args): def func(lib, opts, args):
# The "write to files" option corresponds to the # The "write to files" option corresponds to the
# import_write config value. # import_write config value.
write = ui.should_write() write = ui.should_write()
if opts.writerest:
self.writerest_indexes(opts.writerest)
for item in lib.items(ui.decargs(args)): for item in lib.items(ui.decargs(args)):
if not opts.local_only and not self.config['local']:
self.fetch_item_lyrics( self.fetch_item_lyrics(
lib, item, write, lib, item, write,
opts.force_refetch or self.config['force'], opts.force_refetch or self.config['force'],
) )
if opts.printlyr and item.lyrics: if item.lyrics:
if opts.printlyr:
ui.print_(item.lyrics) ui.print_(item.lyrics)
if opts.writerest:
self.writerest(opts.writerest, item)
if opts.writerest:
# flush last artist
self.writerest(opts.writerest, None)
ui.print_(u'ReST files generated. to build, use one of:')
ui.print_(u' sphinx-build -b html %s _build/html'
% opts.writerest)
ui.print_(u' sphinx-build -b epub %s _build/epub'
% opts.writerest)
ui.print_((u' sphinx-build -b latex %s _build/latex '
u'&& make -C _build/latex all-pdf')
% opts.writerest)
cmd.func = func cmd.func = func
return [cmd] return [cmd]
def writerest(self, directory, item):
"""Write the item to an ReST file
This will keep state (in the `rest` variable) in order to avoid
writing continuously to the same files.
"""
if item is None or slug(self.artist) != slug(item.albumartist):
if self.rest is not None:
path = os.path.join(directory, 'artists',
slug(self.artist) + u'.rst')
with open(path, 'wb') as output:
output.write(self.rest.encode('utf-8'))
self.rest = None
if item is None:
return
self.artist = item.albumartist.strip()
self.rest = u"%s\n%s\n\n.. contents::\n :local:\n\n" \
% (self.artist,
u'=' * len(self.artist))
if self.album != item.album:
tmpalbum = self.album = item.album.strip()
if self.album == '':
tmpalbum = u'Unknown album'
self.rest += u"%s\n%s\n\n" % (tmpalbum, u'-' * len(tmpalbum))
title_str = u":index:`%s`" % item.title.strip()
block = u'| ' + item.lyrics.replace(u'\n', u'\n| ')
self.rest += u"%s\n%s\n\n%s\n\n" % (title_str,
u'~' * len(title_str),
block)
def writerest_indexes(self, directory):
"""Write conf.py and index.rst files necessary for Sphinx
We write minimal configurations that are necessary for Sphinx
to operate. We do not overwrite existing files so that
customizations are respected."""
try:
os.makedirs(os.path.join(directory, 'artists'))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
indexfile = os.path.join(directory, 'index.rst')
if not os.path.exists(indexfile):
with open(indexfile, 'w') as output:
output.write(REST_INDEX_TEMPLATE)
conffile = os.path.join(directory, 'conf.py')
if not os.path.exists(conffile):
with open(conffile, 'w') as output:
output.write(REST_CONF_TEMPLATE)
def imported(self, session, task): def imported(self, session, task):
"""Import hook for fetching lyrics automatically. """Import hook for fetching lyrics automatically.
""" """
@ -685,7 +828,8 @@ class LyricsPlugin(plugins.BeetsPlugin):
def fetch_item_lyrics(self, lib, item, write, force): def fetch_item_lyrics(self, lib, item, write, force):
"""Fetch and store lyrics for a single item. If ``write``, then the """Fetch and store lyrics for a single item. If ``write``, then the
lyrics will also be written to the file itself.""" lyrics will also be written to the file itself.
"""
# Skip if the item already has lyrics. # Skip if the item already has lyrics.
if not force and item.lyrics: if not force and item.lyrics:
self._log.info(u'lyrics already present: {0}', item) self._log.info(u'lyrics already present: {0}', item)
@ -740,7 +884,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
if self.bing_auth_token: if self.bing_auth_token:
# Extract unique lines to limit API request size per song # Extract unique lines to limit API request size per song
text_lines = set(text.split('\n')) text_lines = set(text.split('\n'))
url = ('http://api.microsofttranslator.com/v2/Http.svc/' url = ('https://api.microsofttranslator.com/v2/Http.svc/'
'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang)) 'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))
r = requests.get(url, r = requests.get(url,
headers={"Authorization ": self.bing_auth_token}) headers={"Authorization ": self.bing_auth_token})
@ -751,7 +895,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
self.bing_auth_token = None self.bing_auth_token = None
return self.append_translation(text, to_lang) return self.append_translation(text, to_lang)
return text return text
lines_translated = ET.fromstring(r.text.encode('utf8')).text lines_translated = ET.fromstring(r.text.encode('utf-8')).text
# Use a translation mapping dict to build resulting lyrics # Use a translation mapping dict to build resulting lyrics
translations = dict(zip(text_lines, lines_translated.split('|'))) translations = dict(zip(text_lines, lines_translated.split('|')))
result = '' result = ''

View file

@ -1,17 +1,17 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# Copyright (c) 2011, Jeffrey Aylesworth <jeffrey@aylesworth.ca> # This file is part of beets.
# Copyright (c) 2011, Jeffrey Aylesworth <mail@jeffrey.red>
# #
# Permission to use, copy, modify, and/or distribute this software for any # Permission is hereby granted, free of charge, to any person obtaining
# purpose with or without fee is hereby granted, provided that the above # a copy of this software and associated documentation files (the
# copyright notice and this permission notice appear in all copies. # "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
# #
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # The above copyright notice and this permission notice shall be
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # included in all copies or substantial portions of the Software.
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
@ -24,6 +24,7 @@ import musicbrainzngs
import re import re
SUBMISSION_CHUNK_SIZE = 200 SUBMISSION_CHUNK_SIZE = 200
FETCH_CHUNK_SIZE = 100
UUID_REGEX = r'^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$' UUID_REGEX = r'^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$'
@ -57,44 +58,93 @@ class MusicBrainzCollectionPlugin(BeetsPlugin):
super(MusicBrainzCollectionPlugin, self).__init__() super(MusicBrainzCollectionPlugin, self).__init__()
config['musicbrainz']['pass'].redact = True config['musicbrainz']['pass'].redact = True
musicbrainzngs.auth( musicbrainzngs.auth(
config['musicbrainz']['user'].get(unicode), config['musicbrainz']['user'].as_str(),
config['musicbrainz']['pass'].get(unicode), config['musicbrainz']['pass'].as_str(),
) )
self.config.add({'auto': False}) self.config.add({
'auto': False,
'collection': u'',
'remove': False,
})
if self.config['auto']: if self.config['auto']:
self.import_stages = [self.imported] self.import_stages = [self.imported]
def _get_collection(self):
collections = mb_call(musicbrainzngs.get_collections)
if not collections['collection-list']:
raise ui.UserError(u'no collections exist for user')
# Get all collection IDs, avoiding event collections
collection_ids = [x['id'] for x in collections['collection-list']]
if not collection_ids:
raise ui.UserError(u'No collection found.')
# Check that the collection exists so we can present a nice error
collection = self.config['collection'].as_str()
if collection:
if collection not in collection_ids:
raise ui.UserError(u'invalid collection ID: {}'
.format(collection))
return collection
# No specified collection. Just return the first collection ID
return collection_ids[0]
def _get_albums_in_collection(self, id):
def _fetch(offset):
res = mb_call(
musicbrainzngs.get_releases_in_collection,
id,
limit=FETCH_CHUNK_SIZE,
offset=offset
)['collection']
return [x['id'] for x in res['release-list']], res['release-count']
offset = 0
albums_in_collection, release_count = _fetch(offset)
for i in range(0, release_count, FETCH_CHUNK_SIZE):
albums_in_collection += _fetch(offset)[0]
offset += FETCH_CHUNK_SIZE
return albums_in_collection
def commands(self): def commands(self):
mbupdate = Subcommand('mbupdate', mbupdate = Subcommand('mbupdate',
help=u'Update MusicBrainz collection') help=u'Update MusicBrainz collection')
mbupdate.parser.add_option('-r', '--remove',
action='store_true',
default=None,
dest='remove',
help='Remove albums not in beets library')
mbupdate.func = self.update_collection mbupdate.func = self.update_collection
return [mbupdate] return [mbupdate]
def remove_missing(self, collection_id, lib_albums):
lib_ids = set([x.mb_albumid for x in lib_albums])
albums_in_collection = self._get_albums_in_collection(collection_id)
remove_me = list(set(albums_in_collection) - lib_ids)
for i in range(0, len(remove_me), FETCH_CHUNK_SIZE):
chunk = remove_me[i:i + FETCH_CHUNK_SIZE]
mb_call(
musicbrainzngs.remove_releases_from_collection,
collection_id, chunk
)
def update_collection(self, lib, opts, args): def update_collection(self, lib, opts, args):
self.update_album_list(lib.albums()) self.config.set_args(opts)
remove_missing = self.config['remove'].get(bool)
self.update_album_list(lib, lib.albums(), remove_missing)
def imported(self, session, task): def imported(self, session, task):
"""Add each imported album to the collection. """Add each imported album to the collection.
""" """
if task.is_album: if task.is_album:
self.update_album_list([task.album]) self.update_album_list(session.lib, [task.album])
def update_album_list(self, album_list): def update_album_list(self, lib, album_list, remove_missing=False):
"""Update the MusicBrainz colleciton from a list of Beets albums """Update the MusicBrainz collection from a list of Beets albums
""" """
# Get the available collections. collection_id = self._get_collection()
collections = mb_call(musicbrainzngs.get_collections)
if not collections['collection-list']:
raise ui.UserError(u'no collections exist for user')
# Get the first release collection. MusicBrainz also has event
# collections, so we need to avoid adding to those.
for collection in collections['collection-list']:
if 'release-count' in collection:
collection_id = collection['id']
break
else:
raise ui.UserError(u'No collection found.')
# Get a list of all the album IDs. # Get a list of all the album IDs.
album_ids = [] album_ids = []
@ -111,4 +161,6 @@ class MusicBrainzCollectionPlugin(BeetsPlugin):
u'Updating MusicBrainz collection {0}...', collection_id u'Updating MusicBrainz collection {0}...', collection_id
) )
submit_albums(collection_id, album_ids) submit_albums(collection_id, album_ids)
if remove_missing:
self.remove_missing(collection_id, lib.albums())
self._log.info(u'...MusicBrainz collection updated.') self._log.info(u'...MusicBrainz collection updated.')

View file

@ -36,7 +36,7 @@ class MBSubmitPlugin(BeetsPlugin):
super(MBSubmitPlugin, self).__init__() super(MBSubmitPlugin, self).__init__()
self.config.add({ self.config.add({
'format': '$track. $title - $artist ($length)', 'format': u'$track. $title - $artist ($length)',
'threshold': 'medium', 'threshold': 'medium',
}) })
@ -56,5 +56,5 @@ class MBSubmitPlugin(BeetsPlugin):
return [PromptChoice(u'p', u'Print tracks', self.print_tracks)] return [PromptChoice(u'p', u'Print tracks', self.print_tracks)]
def print_tracks(self, session, task): def print_tracks(self, session, task):
for i in task.items: for i in sorted(task.items, key=lambda i: i.track):
print_data(None, i, self.config['format'].get()) print_data(None, i, self.config['format'].as_str())

View file

@ -117,23 +117,30 @@ class MBSyncPlugin(BeetsPlugin):
album_formatted) album_formatted)
continue continue
# Map recording MBIDs to their information. Recordings can appear # Map release track and recording MBIDs to their information.
# multiple times on a release, so each MBID maps to a list of # Recordings can appear multiple times on a release, so each MBID
# TrackInfo objects. # maps to a list of TrackInfo objects.
releasetrack_index = dict()
track_index = defaultdict(list) track_index = defaultdict(list)
for track_info in album_info.tracks: for track_info in album_info.tracks:
releasetrack_index[track_info.release_track_id] = track_info
track_index[track_info.track_id].append(track_info) track_index[track_info.track_id].append(track_info)
# Construct a track mapping according to MBIDs. This should work # Construct a track mapping according to MBIDs (release track MBIDs
# for albums that have missing or extra tracks. If there are # first, if available, and recording MBIDs otherwise). This should
# multiple copies of a recording, they are disambiguated using # work for albums that have missing or extra tracks.
# their disc and track number.
mapping = {} mapping = {}
for item in items: for item in items:
if item.mb_releasetrackid and \
item.mb_releasetrackid in releasetrack_index:
mapping[item] = releasetrack_index[item.mb_releasetrackid]
else:
candidates = track_index[item.mb_trackid] candidates = track_index[item.mb_trackid]
if len(candidates) == 1: if len(candidates) == 1:
mapping[item] = candidates[0] mapping[item] = candidates[0]
else: else:
# If there are multiple copies of a recording, they are
# disambiguated using their disc and track number.
for c in candidates: for c in candidates:
if (c.medium_index == item.track and if (c.medium_index == item.track and
c.medium == item.disc): c.medium == item.disc):
@ -145,10 +152,13 @@ class MBSyncPlugin(BeetsPlugin):
with lib.transaction(): with lib.transaction():
autotag.apply_metadata(album_info, mapping) autotag.apply_metadata(album_info, mapping)
changed = False changed = False
# Find any changed item to apply MusicBrainz changes to album.
any_changed_item = items[0]
for item in items: for item in items:
item_changed = ui.show_model_changes(item) item_changed = ui.show_model_changes(item)
changed |= item_changed changed |= item_changed
if item_changed: if item_changed:
any_changed_item = item
apply_item_changes(lib, item, move, pretend, write) apply_item_changes(lib, item, move, pretend, write)
if not changed: if not changed:
@ -158,7 +168,7 @@ class MBSyncPlugin(BeetsPlugin):
if not pretend: if not pretend:
# Update album structure to reflect an item in it. # Update album structure to reflect an item in it.
for key in library.Album.item_keys: for key in library.Album.item_keys:
a[key] = items[0][key] a[key] = any_changed_item[key]
a.store() a.store()
# Move album art (and any inconsistent items). # Move album art (and any inconsistent items).

View file

@ -24,6 +24,7 @@ from importlib import import_module
from beets.util.confit import ConfigValueError from beets.util.confit import ConfigValueError
from beets import ui from beets import ui
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
import six
METASYNC_MODULE = 'beetsplug.metasync' METASYNC_MODULE = 'beetsplug.metasync'
@ -35,9 +36,7 @@ SOURCES = {
} }
class MetaSource(object): class MetaSource(six.with_metaclass(ABCMeta, object)):
__metaclass__ = ABCMeta
def __init__(self, config, log): def __init__(self, config, log):
self.item_types = {} self.item_types = {}
self.config = config self.config = config

View file

@ -21,7 +21,7 @@ from __future__ import division, absolute_import, print_function
from os.path import basename from os.path import basename
from datetime import datetime from datetime import datetime
from time import mktime from time import mktime
from xml.sax.saxutils import escape from xml.sax.saxutils import quoteattr
from beets.util import displayable_path from beets.util import displayable_path
from beets.dbcore import types from beets.dbcore import types
@ -51,7 +51,7 @@ class Amarok(MetaSource):
queryXML = u'<query version="1.0"> \ queryXML = u'<query version="1.0"> \
<filters> \ <filters> \
<and><include field="filename" value="%s" /></and> \ <and><include field="filename" value=%s /></and> \
</filters> \ </filters> \
</query>' </query>'
@ -71,7 +71,9 @@ class Amarok(MetaSource):
# for the patch relative to the mount point. But the full path is part # for the patch relative to the mount point. But the full path is part
# of the result set. So query for the filename and then try to match # of the result set. So query for the filename and then try to match
# the correct item from the results we get back # the correct item from the results we get back
results = self.collection.Query(self.queryXML % escape(basename(path))) results = self.collection.Query(
self.queryXML % quoteattr(basename(path))
)
for result in results: for result in results:
if result['xesam:url'] != path: if result['xesam:url'] != path:
continue continue

View file

@ -23,8 +23,8 @@ import os
import shutil import shutil
import tempfile import tempfile
import plistlib import plistlib
import urllib
from urlparse import urlparse from six.moves.urllib.parse import urlparse, unquote
from time import mktime from time import mktime
from beets import util from beets import util
@ -57,7 +57,7 @@ def _norm_itunes_path(path):
# E.g., '\\G:\\Music\\bar' needs to be stripped to 'G:\\Music\\bar' # E.g., '\\G:\\Music\\bar' needs to be stripped to 'G:\\Music\\bar'
return util.bytestring_path(os.path.normpath( return util.bytestring_path(os.path.normpath(
urllib.unquote(urlparse(path).path)).lstrip('\\')).lower() unquote(urlparse(path).path)).lstrip('\\')).lower()
class Itunes(MetaSource): class Itunes(MetaSource):

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# This file is part of beets. # This file is part of beets.
# Copyright 2016, Pedro Silva. # Copyright 2016, Pedro Silva.
# Copyright 2017, Quentin Young.
# #
# Permission is hereby granted, free of charge, to any person obtaining # Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the # a copy of this software and associated documentation files (the
@ -17,11 +18,16 @@
""" """
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import musicbrainzngs
from musicbrainzngs.musicbrainz import MusicBrainzError
from collections import defaultdict
from beets.autotag import hooks from beets.autotag import hooks
from beets.library import Item from beets.library import Item
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, Subcommand from beets.ui import decargs, print_, Subcommand
from beets import config from beets import config
from beets.dbcore import types
def _missing_count(album): def _missing_count(album):
@ -81,12 +87,18 @@ def _item(track_info, album_info, album_id):
class MissingPlugin(BeetsPlugin): class MissingPlugin(BeetsPlugin):
"""List missing tracks """List missing tracks
""" """
album_types = {
'missing': types.INTEGER,
}
def __init__(self): def __init__(self):
super(MissingPlugin, self).__init__() super(MissingPlugin, self).__init__()
self.config.add({ self.config.add({
'count': False, 'count': False,
'total': False, 'total': False,
'album': False,
}) })
self.album_template_fields['missing'] = _missing_count self.album_template_fields['missing'] = _missing_count
@ -100,16 +112,32 @@ class MissingPlugin(BeetsPlugin):
self._command.parser.add_option( self._command.parser.add_option(
u'-t', u'--total', dest='total', action='store_true', u'-t', u'--total', dest='total', action='store_true',
help=u'count total of missing tracks') help=u'count total of missing tracks')
self._command.parser.add_option(
u'-a', u'--album', dest='album', action='store_true',
help=u'show missing albums for artist instead of tracks')
self._command.parser.add_format_option() self._command.parser.add_format_option()
def commands(self): def commands(self):
def _miss(lib, opts, args): def _miss(lib, opts, args):
self.config.set_args(opts) self.config.set_args(opts)
albms = self.config['album'].get()
helper = self._missing_albums if albms else self._missing_tracks
helper(lib, decargs(args))
self._command.func = _miss
return [self._command]
def _missing_tracks(self, lib, query):
"""Print a listing of tracks missing from each album in the library
matching query.
"""
albums = lib.albums(query)
count = self.config['count'].get() count = self.config['count'].get()
total = self.config['total'].get() total = self.config['total'].get()
fmt = config['format_album' if count else 'format_item'].get() fmt = config['format_album' if count else 'format_item'].get()
albums = lib.albums(decargs(args))
if total: if total:
print(sum([_missing_count(a) for a in albums])) print(sum([_missing_count(a) for a in albums]))
return return
@ -127,13 +155,67 @@ class MissingPlugin(BeetsPlugin):
for item in self._missing(album): for item in self._missing(album):
print_(format(item, fmt)) print_(format(item, fmt))
self._command.func = _miss def _missing_albums(self, lib, query):
return [self._command] """Print a listing of albums missing from each artist in the library
matching query.
"""
total = self.config['total'].get()
albums = lib.albums(query)
# build dict mapping artist to list of their albums in library
albums_by_artist = defaultdict(list)
for alb in albums:
artist = (alb['albumartist'], alb['mb_albumartistid'])
albums_by_artist[artist].append(alb)
total_missing = 0
# build dict mapping artist to list of all albums
for artist, albums in albums_by_artist.items():
if artist[1] is None or artist[1] == "":
albs_no_mbid = [u"'" + a['album'] + u"'" for a in albums]
self._log.info(
u"No musicbrainz ID for artist '{}' found in album(s) {}; "
"skipping", artist[0], u", ".join(albs_no_mbid)
)
continue
try:
resp = musicbrainzngs.browse_release_groups(artist=artist[1])
release_groups = resp['release-group-list']
except MusicBrainzError as err:
self._log.info(
u"Couldn't fetch info for artist '{}' ({}) - '{}'",
artist[0], artist[1], err
)
continue
missing = []
present = []
for rg in release_groups:
missing.append(rg)
for alb in albums:
if alb['mb_releasegroupid'] == rg['id']:
missing.remove(rg)
present.append(rg)
break
total_missing += len(missing)
if total:
continue
missing_titles = {rg['title'] for rg in missing}
for release_title in missing_titles:
print_(u"{} - {}".format(artist[0], release_title))
if total:
print(total_missing)
def _missing(self, album): def _missing(self, album):
"""Query MusicBrainz to determine items missing from `album`. """Query MusicBrainz to determine items missing from `album`.
""" """
item_mbids = map(lambda x: x.mb_trackid, album.items()) item_mbids = [x.mb_trackid for x in album.items()]
if len([i for i in album.items()]) < album.albumtotal: if len([i for i in album.items()]) < album.albumtotal:
# fetch missing items # fetch missing items
# TODO: Implement caching that without breaking other stuff # TODO: Implement caching that without breaking other stuff

View file

@ -40,36 +40,24 @@ mpd_config = config['mpd']
def is_url(path): def is_url(path):
"""Try to determine if the path is an URL. """Try to determine if the path is an URL.
""" """
if isinstance(path, bytes): # if it's bytes, then it's a path
return False
return path.split('://', 1)[0] in ['http', 'https'] return path.split('://', 1)[0] in ['http', 'https']
# Use the MPDClient internals to get unicode.
# see http://www.tarmack.eu/code/mpdunicode.py for the general idea
class MPDClient(mpd.MPDClient):
def _write_command(self, command, args=[]):
args = [unicode(arg).encode('utf-8') for arg in args]
super(MPDClient, self)._write_command(command, args)
def _read_line(self):
line = super(MPDClient, self)._read_line()
if line is not None:
return line.decode('utf-8')
return None
class MPDClientWrapper(object): class MPDClientWrapper(object):
def __init__(self, log): def __init__(self, log):
self._log = log self._log = log
self.music_directory = ( self.music_directory = (
mpd_config['music_directory'].get(unicode)) mpd_config['music_directory'].as_str())
self.client = MPDClient() self.client = mpd.MPDClient(use_unicode=True)
def connect(self): def connect(self):
"""Connect to the MPD. """Connect to the MPD.
""" """
host = mpd_config['host'].get(unicode) host = mpd_config['host'].as_str()
port = mpd_config['port'].get(int) port = mpd_config['port'].get(int)
if host[0] in ['/', '~']: if host[0] in ['/', '~']:
@ -81,7 +69,7 @@ class MPDClientWrapper(object):
except socket.error as e: except socket.error as e:
raise ui.UserError(u'could not connect to MPD: {0}'.format(e)) raise ui.UserError(u'could not connect to MPD: {0}'.format(e))
password = mpd_config['password'].get(unicode) password = mpd_config['password'].as_str()
if password: if password:
try: try:
self.client.password(password) self.client.password(password)
@ -268,21 +256,30 @@ class MPDStats(object):
if not path: if not path:
return return
if is_url(path):
self._log.info(u'playing stream {0}', displayable_path(path))
return
played, duration = map(int, status['time'].split(':', 1)) played, duration = map(int, status['time'].split(':', 1))
remaining = duration - played remaining = duration - played
if self.now_playing and self.now_playing['path'] != path: if self.now_playing:
skipped = self.handle_song_change(self.now_playing) if self.now_playing['path'] != path:
# mpd responds twice on a natural new song start self.handle_song_change(self.now_playing)
going_to_happen_twice = not skipped
else: else:
going_to_happen_twice = False # In case we got mpd play event with same song playing
# multiple times,
# assume low diff means redundant second play event
# after natural song start.
diff = abs(time.time() - self.now_playing['started'])
if diff <= self.time_threshold:
return
if self.now_playing['path'] == path and played == 0:
self.handle_song_change(self.now_playing)
if is_url(path):
self._log.info(u'playing stream {0}', displayable_path(path))
self.now_playing = None
return
if not going_to_happen_twice:
self._log.info(u'playing {0}', displayable_path(path)) self._log.info(u'playing {0}', displayable_path(path))
self.now_playing = { self.now_playing = {
@ -328,7 +325,7 @@ class MPDStatsPlugin(plugins.BeetsPlugin):
'music_directory': config['directory'].as_filename(), 'music_directory': config['directory'].as_filename(),
'rating': True, 'rating': True,
'rating_mix': 0.75, 'rating_mix': 0.75,
'host': u'localhost', 'host': os.environ.get('MPD_HOST', u'localhost'),
'port': 6600, 'port': 6600,
'password': u'', 'password': u'',
}) })
@ -353,11 +350,11 @@ class MPDStatsPlugin(plugins.BeetsPlugin):
# Overrides for MPD settings. # Overrides for MPD settings.
if opts.host: if opts.host:
mpd_config['host'] = opts.host.decode('utf8') mpd_config['host'] = opts.host.decode('utf-8')
if opts.port: if opts.port:
mpd_config['host'] = int(opts.port) mpd_config['host'] = int(opts.port)
if opts.password: if opts.password:
mpd_config['password'] = opts.password.decode('utf8') mpd_config['password'] = opts.password.decode('utf-8')
try: try:
MPDStats(lib, self._log).run() MPDStats(lib, self._log).run()

View file

@ -27,6 +27,7 @@ from beets.plugins import BeetsPlugin
import os import os
import socket import socket
from beets import config from beets import config
import six
# No need to introduce a dependency on an MPD library for such a # No need to introduce a dependency on an MPD library for such a
@ -34,14 +35,14 @@ from beets import config
# easier. # easier.
class BufferedSocket(object): class BufferedSocket(object):
"""Socket abstraction that allows reading by line.""" """Socket abstraction that allows reading by line."""
def __init__(self, host, port, sep='\n'): def __init__(self, host, port, sep=b'\n'):
if host[0] in ['/', '~']: if host[0] in ['/', '~']:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(os.path.expanduser(host)) self.sock.connect(os.path.expanduser(host))
else: else:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port)) self.sock.connect((host, port))
self.buf = '' self.buf = b''
self.sep = sep self.sep = sep
def readline(self): def readline(self):
@ -50,11 +51,11 @@ class BufferedSocket(object):
if not data: if not data:
break break
self.buf += data self.buf += data
if '\n' in self.buf: if self.sep in self.buf:
res, self.buf = self.buf.split(self.sep, 1) res, self.buf = self.buf.split(self.sep, 1)
return res + self.sep return res + self.sep
else: else:
return '' return b''
def send(self, data): def send(self, data):
self.sock.send(data) self.sock.send(data)
@ -67,7 +68,7 @@ class MPDUpdatePlugin(BeetsPlugin):
def __init__(self): def __init__(self):
super(MPDUpdatePlugin, self).__init__() super(MPDUpdatePlugin, self).__init__()
config['mpd'].add({ config['mpd'].add({
'host': u'localhost', 'host': os.environ.get('MPD_HOST', u'localhost'),
'port': 6600, 'port': 6600,
'password': u'', 'password': u'',
}) })
@ -86,9 +87,9 @@ class MPDUpdatePlugin(BeetsPlugin):
def update(self, lib): def update(self, lib):
self.update_mpd( self.update_mpd(
config['mpd']['host'].get(unicode), config['mpd']['host'].as_str(),
config['mpd']['port'].get(int), config['mpd']['port'].get(int),
config['mpd']['password'].get(unicode), config['mpd']['password'].as_str(),
) )
def update_mpd(self, host='localhost', port=6600, password=None): def update_mpd(self, host='localhost', port=6600, password=None):
@ -101,28 +102,28 @@ class MPDUpdatePlugin(BeetsPlugin):
s = BufferedSocket(host, port) s = BufferedSocket(host, port)
except socket.error as e: except socket.error as e:
self._log.warning(u'MPD connection failed: {0}', self._log.warning(u'MPD connection failed: {0}',
unicode(e.strerror)) six.text_type(e.strerror))
return return
resp = s.readline() resp = s.readline()
if 'OK MPD' not in resp: if b'OK MPD' not in resp:
self._log.warning(u'MPD connection failed: {0!r}', resp) self._log.warning(u'MPD connection failed: {0!r}', resp)
return return
if password: if password:
s.send('password "%s"\n' % password) s.send(b'password "%s"\n' % password.encode('utf8'))
resp = s.readline() resp = s.readline()
if 'OK' not in resp: if b'OK' not in resp:
self._log.warning(u'Authentication failed: {0!r}', resp) self._log.warning(u'Authentication failed: {0!r}', resp)
s.send('close\n') s.send(b'close\n')
s.close() s.close()
return return
s.send('update\n') s.send(b'update\n')
resp = s.readline() resp = s.readline()
if 'updating_db' not in resp: if b'updating_db' not in resp:
self._log.warning(u'Update failed: {0!r}', resp) self._log.warning(u'Update failed: {0!r}', resp)
s.send('close\n') s.send(b'close\n')
s.close() s.close()
self._log.info(u'Database updated.') self._log.info(u'Database updated.')

View file

@ -13,24 +13,43 @@ import os
from beets import config, util from beets import config, util
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.util import ancestry from beets.util import ancestry
import six
def convert_perm(perm): def convert_perm(perm):
"""If the perm is a int it will first convert it to a string and back """Convert a string to an integer, interpreting the text as octal.
to an oct int. Else it just converts it to oct. Or, if `perm` is an integer, reinterpret it as an octal number that
has been "misinterpreted" as decimal.
""" """
if isinstance(perm, int): if isinstance(perm, six.integer_types):
return int(bytes(perm), 8) perm = six.text_type(perm)
else:
return int(perm, 8) return int(perm, 8)
def check_permissions(path, permission): def check_permissions(path, permission):
"""Checks the permissions of a path. """Check whether the file's permissions equal the given vector.
Return a boolean.
""" """
return oct(os.stat(path).st_mode & 0o777) == oct(permission) return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def assert_permissions(path, permission, log):
"""Check whether the file's permissions are as expected, otherwise,
log a warning message. Return a boolean indicating the match, like
`check_permissions`.
"""
if not check_permissions(util.syspath(path), permission):
log.warning(
u'could not set permissions on {}',
util.displayable_path(path),
)
log.debug(
u'set permissions to {}, but permissions are now {}',
permission,
os.stat(util.syspath(path)).st_mode & 0o777,
)
def dirs_in_library(library, item): def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path. """Creates a list of ancestor directories in the beets library path.
""" """
@ -45,22 +64,22 @@ class Permissions(BeetsPlugin):
# Adding defaults. # Adding defaults.
self.config.add({ self.config.add({
u'file': 644, u'file': '644',
u'dir': 755 u'dir': '755',
}) })
self.register_listener('item_imported', permissions) self.register_listener('item_imported', self.fix)
self.register_listener('album_imported', permissions) self.register_listener('album_imported', self.fix)
def fix(self, lib, item=None, album=None):
def permissions(lib, item=None, album=None): """Fix the permissions for an imported Item or Album.
"""Running the permission fixer.
""" """
# Getting the config. # Get the configured permissions. The user can specify this either a
# string (in YAML quotes) or, for convenience, as an integer so the
# quotes can be omitted. In the latter case, we need to reinterpret the
# integer as octal, not decimal.
file_perm = config['permissions']['file'].get() file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get() dir_perm = config['permissions']['dir'].get()
# Converts permissions to oct.
file_perm = convert_perm(file_perm) file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm) dir_perm = convert_perm(dir_perm)
@ -77,13 +96,14 @@ def permissions(lib, item=None, album=None):
for path in file_chmod_queue: for path in file_chmod_queue:
# Changing permissions on the destination file. # Changing permissions on the destination file.
os.chmod(util.bytestring_path(path), file_perm) self._log.debug(
u'setting file permissions on {}',
util.displayable_path(path),
)
os.chmod(util.syspath(path), file_perm)
# Checks if the destination path has the permissions configured. # Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), file_perm): assert_permissions(path, file_perm, self._log)
message = u'There was a problem setting permission on {}'.format(
path)
print(message)
# Adding directories to the directory chmod queue. # Adding directories to the directory chmod queue.
dir_chmod_queue.update( dir_chmod_queue.update(
@ -93,10 +113,11 @@ def permissions(lib, item=None, album=None):
# Change permissions for the directories. # Change permissions for the directories.
for path in dir_chmod_queue: for path in dir_chmod_queue:
# Chaning permissions on the destination directory. # Chaning permissions on the destination directory.
os.chmod(util.bytestring_path(path), dir_perm) self._log.debug(
u'setting directory permissions on {}',
util.displayable_path(path),
)
os.chmod(util.syspath(path), dir_perm)
# Checks if the destination path has the permissions configured. # Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), dir_perm): assert_permissions(path, dir_perm, self._log)
message = u'There was a problem setting permission on {}'.format(
path)
print(message)

View file

@ -19,17 +19,41 @@ from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.ui import Subcommand from beets.ui import Subcommand
from beets.ui.commands import PromptChoice
from beets import config from beets import config
from beets import ui from beets import ui
from beets import util from beets import util
from os.path import relpath from os.path import relpath
from tempfile import NamedTemporaryFile from tempfile import NamedTemporaryFile
import subprocess
# Indicate where arguments should be inserted into the command string. # Indicate where arguments should be inserted into the command string.
# If this is missing, they're placed at the end. # If this is missing, they're placed at the end.
ARGS_MARKER = '$args' ARGS_MARKER = '$args'
def play(command_str, selection, paths, open_args, log, item_type='track',
keep_open=False):
"""Play items in paths with command_str and optional arguments. If
keep_open, return to beets, otherwise exit once command runs.
"""
# Print number of tracks or albums to be played, log command to be run.
item_type += 's' if len(selection) > 1 else ''
ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type))
log.debug(u'executing command: {} {!r}', command_str, open_args)
try:
if keep_open:
command = util.shlex_split(command_str)
command = command + open_args
subprocess.call(command)
else:
util.interactive_open(open_args, command_str)
except OSError as exc:
raise ui.UserError(
"Could not play the query: {0}".format(exc))
class PlayPlugin(BeetsPlugin): class PlayPlugin(BeetsPlugin):
def __init__(self): def __init__(self):
@ -40,11 +64,12 @@ class PlayPlugin(BeetsPlugin):
'use_folders': False, 'use_folders': False,
'relative_to': None, 'relative_to': None,
'raw': False, 'raw': False,
# Backwards compatibility. See #1803 and line 74 'warning_threshold': 100,
'warning_threshold': -2,
'warning_treshold': 100,
}) })
self.register_listener('before_choose_candidate',
self.before_choose_candidate_listener)
def commands(self): def commands(self):
play_command = Subcommand( play_command = Subcommand(
'play', 'play',
@ -56,41 +81,22 @@ class PlayPlugin(BeetsPlugin):
action='store', action='store',
help=u'add additional arguments to the command', help=u'add additional arguments to the command',
) )
play_command.func = self.play_music play_command.parser.add_option(
u'-y', u'--yes',
action="store_true",
help=u'skip the warning threshold',
)
play_command.func = self._play_command
return [play_command] return [play_command]
def play_music(self, lib, opts, args): def _play_command(self, lib, opts, args):
"""Execute query, create temporary playlist and execute player """The CLI command function for `beet play`. Create a list of paths
command passing that playlist, at request insert optional arguments. from query, determine if tracks or albums are to be played.
""" """
command_str = config['play']['command'].get()
if not command_str:
command_str = util.open_anything()
use_folders = config['play']['use_folders'].get(bool) use_folders = config['play']['use_folders'].get(bool)
relative_to = config['play']['relative_to'].get() relative_to = config['play']['relative_to'].get()
raw = config['play']['raw'].get(bool)
warning_threshold = config['play']['warning_threshold'].get(int)
# We use -2 as a default value for warning_threshold to detect if it is
# set or not. We can't use a falsey value because it would have an
# actual meaning in the configuration of this plugin, and we do not use
# -1 because some people might use it as a value to obtain no warning,
# which wouldn't be that bad of a practice.
if warning_threshold == -2:
# if warning_threshold has not been set by user, look for
# warning_treshold, to preserve backwards compatibility. See #1803.
# warning_treshold has the correct default value of 100.
warning_threshold = config['play']['warning_treshold'].get(int)
if relative_to: if relative_to:
relative_to = util.normpath(relative_to) relative_to = util.normpath(relative_to)
# Add optional arguments to the player command.
if opts.args:
if ARGS_MARKER in command_str:
command_str = command_str.replace(ARGS_MARKER, opts.args)
else:
command_str = u"{} {}".format(command_str, opts.args)
# Perform search by album and add folders rather than tracks to # Perform search by album and add folders rather than tracks to
# playlist. # playlist.
if opts.album: if opts.album:
@ -110,46 +116,95 @@ class PlayPlugin(BeetsPlugin):
else: else:
selection = lib.items(ui.decargs(args)) selection = lib.items(ui.decargs(args))
paths = [item.path for item in selection] paths = [item.path for item in selection]
if relative_to:
paths = [relpath(path, relative_to) for path in paths]
item_type = 'track' item_type = 'track'
item_type += 's' if len(selection) > 1 else '' if relative_to:
paths = [relpath(path, relative_to) for path in paths]
if not selection: if not selection:
ui.print_(ui.colorize('text_warning', ui.print_(ui.colorize('text_warning',
u'No {0} to play.'.format(item_type))) u'No {0} to play.'.format(item_type)))
return return
open_args = self._playlist_or_paths(paths)
command_str = self._command_str(opts.args)
# Check if the selection exceeds configured threshold. If True,
# cancel, otherwise proceed with play command.
if opts.yes or not self._exceeds_threshold(
selection, command_str, open_args, item_type):
play(command_str, selection, paths, open_args, self._log,
item_type)
def _command_str(self, args=None):
"""Create a command string from the config command and optional args.
"""
command_str = config['play']['command'].get()
if not command_str:
return util.open_anything()
# Add optional arguments to the player command.
if args:
if ARGS_MARKER in command_str:
return command_str.replace(ARGS_MARKER, args)
else:
return u"{} {}".format(command_str, args)
else:
# Don't include the marker in the command.
return command_str.replace(" " + ARGS_MARKER, "")
def _playlist_or_paths(self, paths):
"""Return either the raw paths of items or a playlist of the items.
"""
if config['play']['raw']:
return paths
else:
return [self._create_tmp_playlist(paths)]
def _exceeds_threshold(self, selection, command_str, open_args,
item_type='track'):
"""Prompt user whether to abort if playlist exceeds threshold. If
True, cancel playback. If False, execute play command.
"""
warning_threshold = config['play']['warning_threshold'].get(int)
# Warn user before playing any huge playlists. # Warn user before playing any huge playlists.
if warning_threshold and len(selection) > warning_threshold: if warning_threshold and len(selection) > warning_threshold:
if len(selection) > 1:
item_type += 's'
ui.print_(ui.colorize( ui.print_(ui.colorize(
'text_warning', 'text_warning',
u'You are about to queue {0} {1}.'.format( u'You are about to queue {0} {1}.'.format(
len(selection), item_type))) len(selection), item_type)))
if ui.input_options(('Continue', 'Abort')) == 'a': if ui.input_options((u'Continue', u'Abort')) == 'a':
return return True
ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type)) return False
if raw:
open_args = paths
else:
open_args = [self._create_tmp_playlist(paths)]
self._log.debug(u'executing command: {} {}', command_str,
b' '.join(open_args))
try:
util.interactive_open(open_args, command_str)
except OSError as exc:
raise ui.UserError(
"Could not play the query: {0}".format(exc))
def _create_tmp_playlist(self, paths_list): def _create_tmp_playlist(self, paths_list):
"""Create a temporary .m3u file. Return the filename. """Create a temporary .m3u file. Return the filename.
""" """
m3u = NamedTemporaryFile('w', suffix='.m3u', delete=False) m3u = NamedTemporaryFile('wb', suffix='.m3u', delete=False)
for item in paths_list: for item in paths_list:
m3u.write(item + b'\n') m3u.write(item + b'\n')
m3u.close() m3u.close()
return m3u.name return m3u.name
def before_choose_candidate_listener(self, session, task):
"""Append a "Play" choice to the interactive importer prompt.
"""
return [PromptChoice('y', 'plaY', self.importer_play)]
def importer_play(self, session, task):
"""Get items from current import task and send to play function.
"""
selection = task.items
paths = [item.path for item in selection]
open_args = self._playlist_or_paths(paths)
command_str = self._command_str()
if not self._exceeds_threshold(selection, command_str, open_args):
play(command_str, selection, paths, open_args, self._log,
keep_open=True)

View file

@ -12,9 +12,8 @@ Put something like the following in your config.yaml to configure:
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import requests import requests
from urlparse import urljoin
from urllib import urlencode
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
from six.moves.urllib.parse import urljoin, urlencode
from beets import config from beets import config
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
@ -68,6 +67,7 @@ class PlexUpdate(BeetsPlugin):
u'token': u'', u'token': u'',
u'library_name': u'Music'}) u'library_name': u'Music'})
config['plex']['token'].redact = True
self.register_listener('database_change', self.listen_for_db_change) self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model): def listen_for_db_change(self, lib, model):

View file

@ -24,56 +24,124 @@ from operator import attrgetter
from itertools import groupby from itertools import groupby
def random_item(lib, opts, args): def _length(obj, album):
query = decargs(args) """Get the duration of an item or album.
"""
if opts.album: if album:
objs = list(lib.albums(query)) return sum(i.length for i in obj.items())
else: else:
objs = list(lib.items(query)) return obj.length
if opts.equal_chance:
def _equal_chance_permutation(objs, field='albumartist'):
"""Generate (lazily) a permutation of the objects where every group
with equal values for `field` have an equal chance of appearing in
any given position.
"""
# Group the objects by artist so we can sample from them. # Group the objects by artist so we can sample from them.
key = attrgetter('albumartist') key = attrgetter(field)
objs.sort(key=key) objs.sort(key=key)
objs_by_artists = {} objs_by_artists = {}
for artist, v in groupby(objs, key): for artist, v in groupby(objs, key):
objs_by_artists[artist] = list(v) objs_by_artists[artist] = list(v)
objs = [] # While we still have artists with music to choose from, pick one
for _ in range(opts.number): # randomly and pick a track from that artist.
# Terminate early if we're out of objects to select. while objs_by_artists:
if not objs_by_artists:
break
# Choose an artist and an object for that artist, removing # Choose an artist and an object for that artist, removing
# this choice from the pool. # this choice from the pool.
artist = random.choice(objs_by_artists.keys()) artist = random.choice(list(objs_by_artists.keys()))
objs_from_artist = objs_by_artists[artist] objs_from_artist = objs_by_artists[artist]
i = random.randint(0, len(objs_from_artist) - 1) i = random.randint(0, len(objs_from_artist) - 1)
objs.append(objs_from_artist.pop(i)) yield objs_from_artist.pop(i)
# Remove the artist if we've used up all of its objects. # Remove the artist if we've used up all of its objects.
if not objs_from_artist: if not objs_from_artist:
del objs_by_artists[artist] del objs_by_artists[artist]
else:
number = min(len(objs), opts.number)
objs = random.sample(objs, number)
for item in objs: def _take(iter, num):
print_(format(item)) """Return a list containing the first `num` values in `iter` (or
fewer, if the iterable ends early).
"""
out = []
for val in iter:
out.append(val)
num -= 1
if num <= 0:
break
return out
def _take_time(iter, secs, album):
"""Return a list containing the first values in `iter`, which should
be Item or Album objects, that add up to the given amount of time in
seconds.
"""
out = []
total_time = 0.0
for obj in iter:
length = _length(obj, album)
if total_time + length <= secs:
out.append(obj)
total_time += length
return out
def random_objs(objs, album, number=1, time=None, equal_chance=False):
"""Get a random subset of the provided `objs`.
If `number` is provided, produce that many matches. Otherwise, if
`time` is provided, instead select a list whose total time is close
to that number of minutes. If `equal_chance` is true, give each
artist an equal chance of being included so that artists with more
songs are not represented disproportionately.
"""
# Permute the objects either in a straightforward way or an
# artist-balanced way.
if equal_chance:
perm = _equal_chance_permutation(objs)
else:
perm = objs
random.shuffle(perm) # N.B. This shuffles the original list.
# Select objects by time our count.
if time:
return _take_time(perm, time * 60, album)
else:
return _take(perm, number)
def random_func(lib, opts, args):
"""Select some random items or albums and print the results.
"""
# Fetch all the objects matching the query into a list.
query = decargs(args)
if opts.album:
objs = list(lib.albums(query))
else:
objs = list(lib.items(query))
# Print a random subset.
objs = random_objs(objs, opts.album, opts.number, opts.time,
opts.equal_chance)
for obj in objs:
print_(format(obj))
random_cmd = Subcommand('random', random_cmd = Subcommand('random',
help=u'chose a random track or album') help=u'choose a random track or album')
random_cmd.parser.add_option( random_cmd.parser.add_option(
u'-n', u'--number', action='store', type="int", u'-n', u'--number', action='store', type="int",
help=u'number of objects to choose', default=1) help=u'number of objects to choose', default=1)
random_cmd.parser.add_option( random_cmd.parser.add_option(
u'-e', u'--equal-chance', action='store_true', u'-e', u'--equal-chance', action='store_true',
help=u'each artist has the same chance') help=u'each artist has the same chance')
random_cmd.parser.add_option(
u'-t', u'--time', action='store', type="float",
help=u'total length in minutes of objects to choose')
random_cmd.parser.add_all_common_options() random_cmd.parser.add_all_common_options()
random_cmd.func = random_item random_cmd.func = random_func
class Random(BeetsPlugin): class Random(BeetsPlugin):

View file

@ -18,15 +18,15 @@ from __future__ import division, absolute_import, print_function
import subprocess import subprocess
import os import os
import collections import collections
import itertools
import sys import sys
import warnings import warnings
import re import xml.parsers.expat
from six.moves import zip
from beets import logging
from beets import ui from beets import ui
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.util import syspath, command_output, displayable_path from beets.util import (syspath, command_output, bytestring_path,
displayable_path, py3_path)
# Utilities. # Utilities.
@ -60,7 +60,7 @@ def call(args):
except UnicodeEncodeError: except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode # Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See: # filenames can fail to encode on that platform. See:
# http://code.google.com/p/beets/issues/detail?id=499 # https://github.com/google-code-export/beets/issues/499
raise ReplayGainError(u"argument encoding failed") raise ReplayGainError(u"argument encoding failed")
@ -102,9 +102,9 @@ class Bs1770gainBackend(Backend):
'method': 'replaygain', 'method': 'replaygain',
}) })
self.chunk_at = config['chunk_at'].as_number() self.chunk_at = config['chunk_at'].as_number()
self.method = b'--' + bytes(config['method'].get(unicode)) self.method = '--' + config['method'].as_str()
cmd = b'bs1770gain' cmd = 'bs1770gain'
try: try:
call([cmd, self.method]) call([cmd, self.method])
self.command = cmd self.command = cmd
@ -194,13 +194,14 @@ class Bs1770gainBackend(Backend):
""" """
# Construct shell command. # Construct shell command.
cmd = [self.command] cmd = [self.command]
cmd = cmd + [self.method] cmd += [self.method]
cmd = cmd + [b'-it'] cmd += ['--xml', '-p']
# Workaround for Windows: the underlying tool fails on paths # Workaround for Windows: the underlying tool fails on paths
# with the \\?\ prefix, so we don't use it here. This # with the \\?\ prefix, so we don't use it here. This
# prevents the backend from working with long paths. # prevents the backend from working with long paths.
args = cmd + [syspath(i.path, prefix=False) for i in items] args = cmd + [syspath(i.path, prefix=False) for i in items]
path_list = [i.path for i in items]
# Invoke the command. # Invoke the command.
self._log.debug( self._log.debug(
@ -209,40 +210,65 @@ class Bs1770gainBackend(Backend):
output = call(args) output = call(args)
self._log.debug(u'analysis finished: {0}', output) self._log.debug(u'analysis finished: {0}', output)
results = self.parse_tool_output(output, results = self.parse_tool_output(output, path_list, is_album)
len(items) + is_album)
self._log.debug(u'{0} items, {1} results', len(items), len(results)) self._log.debug(u'{0} items, {1} results', len(items), len(results))
return results return results
def parse_tool_output(self, text, num_lines): def parse_tool_output(self, text, path_list, is_album):
"""Given the output from bs1770gain, parse the text and """Given the output from bs1770gain, parse the text and
return a list of dictionaries return a list of dictionaries
containing information about each analyzed file. containing information about each analyzed file.
""" """
out = [] per_file_gain = {}
data = text.decode('utf8', errors='ignore') album_gain = {} # mutable variable so it can be set from handlers
regex = re.compile( parser = xml.parsers.expat.ParserCreate(encoding='utf-8')
ur'(\s{2,2}\[\d+\/\d+\].*?|\[ALBUM\].*?)' state = {'file': None, 'gain': None, 'peak': None}
'(?=\s{2,2}\[\d+\/\d+\]|\s{2,2}\[ALBUM\]'
':|done\.\s)', re.DOTALL | re.UNICODE)
results = re.findall(regex, data)
for parts in results[0:num_lines]:
part = parts.split(b'\n')
if len(part) == 0:
self._log.debug(u'bad tool output: {0!r}', text)
raise ReplayGainError(u'bs1770gain failed')
def start_element_handler(name, attrs):
if name == u'track':
state['file'] = bytestring_path(attrs[u'file'])
if state['file'] in per_file_gain:
raise ReplayGainError(
u'duplicate filename in bs1770gain output')
elif name == u'integrated':
state['gain'] = float(attrs[u'lu'])
elif name == u'sample-peak':
state['peak'] = float(attrs[u'factor'])
def end_element_handler(name):
if name == u'track':
if state['gain'] is None or state['peak'] is None:
raise ReplayGainError(u'could not parse gain or peak from '
'the output of bs1770gain')
per_file_gain[state['file']] = Gain(state['gain'],
state['peak'])
state['gain'] = state['peak'] = None
elif name == u'summary':
if state['gain'] is None or state['peak'] is None:
raise ReplayGainError(u'could not parse gain or peak from '
'the output of bs1770gain')
album_gain["album"] = Gain(state['gain'], state['peak'])
state['gain'] = state['peak'] = None
parser.StartElementHandler = start_element_handler
parser.EndElementHandler = end_element_handler
parser.Parse(text, True)
if len(per_file_gain) != len(path_list):
raise ReplayGainError(
u'the number of results returned by bs1770gain does not match '
'the number of files passed to it')
# bs1770gain does not return the analysis results in the order that
# files are passed on the command line, because it is sorting the files
# internally. We must recover the order from the filenames themselves.
try: try:
song = { out = [per_file_gain[os.path.basename(p)] for p in path_list]
'file': part[0], except KeyError:
'gain': float((part[1].split('/'))[1].split('LU')[0]), raise ReplayGainError(
'peak': float(part[2].split('/')[1]), u'unrecognized filename in bs1770gain output '
} '(bs1770gain can only deal with utf-8 file names)')
except IndexError: if is_album:
self._log.info(u'bs1770gain reports (faulty file?): {}', parts) out.append(album_gain["album"])
continue
out.append(Gain(song['gain'], song['peak']))
return out return out
@ -256,7 +282,7 @@ class CommandBackend(Backend):
'noclip': True, 'noclip': True,
}) })
self.command = config["command"].get(unicode) self.command = config["command"].as_str()
if self.command: if self.command:
# Explicit executable path. # Explicit executable path.
@ -267,9 +293,9 @@ class CommandBackend(Backend):
) )
else: else:
# Check whether the program is in $PATH. # Check whether the program is in $PATH.
for cmd in (b'mp3gain', b'aacgain'): for cmd in ('mp3gain', 'aacgain'):
try: try:
call([cmd, b'-v']) call([cmd, '-v'])
self.command = cmd self.command = cmd
except OSError: except OSError:
pass pass
@ -286,7 +312,7 @@ class CommandBackend(Backend):
"""Computes the track gain of the given tracks, returns a list """Computes the track gain of the given tracks, returns a list
of TrackGain objects. of TrackGain objects.
""" """
supported_items = filter(self.format_supported, items) supported_items = list(filter(self.format_supported, items))
output = self.compute_gain(supported_items, False) output = self.compute_gain(supported_items, False)
return output return output
@ -297,7 +323,7 @@ class CommandBackend(Backend):
# TODO: What should be done when not all tracks in the album are # TODO: What should be done when not all tracks in the album are
# supported? # supported?
supported_items = filter(self.format_supported, album.items()) supported_items = list(filter(self.format_supported, album.items()))
if len(supported_items) != len(album.items()): if len(supported_items) != len(album.items()):
self._log.debug(u'tracks are of unsupported format') self._log.debug(u'tracks are of unsupported format')
return AlbumGain(None, []) return AlbumGain(None, [])
@ -334,14 +360,14 @@ class CommandBackend(Backend):
# tag-writing; this turns the mp3gain/aacgain tool into a gain # tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care # calculator rather than a tag manipulator because we take care
# of changing tags ourselves. # of changing tags ourselves.
cmd = [self.command, b'-o', b'-s', b's'] cmd = [self.command, '-o', '-s', 's']
if self.noclip: if self.noclip:
# Adjust to avoid clipping. # Adjust to avoid clipping.
cmd = cmd + [b'-k'] cmd = cmd + ['-k']
else: else:
# Disable clipping warning. # Disable clipping warning.
cmd = cmd + [b'-c'] cmd = cmd + ['-c']
cmd = cmd + [b'-d', bytes(self.gain_offset)] cmd = cmd + ['-d', str(self.gain_offset)]
cmd = cmd + [syspath(i.path) for i in items] cmd = cmd + [syspath(i.path) for i in items]
self._log.debug(u'analyzing {0} files', len(items)) self._log.debug(u'analyzing {0} files', len(items))
@ -574,7 +600,7 @@ class GStreamerBackend(Backend):
self._file = self._files.pop(0) self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL) self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", syspath(self._file.path)) self._src.set_property("location", py3_path(syspath(self._file.path)))
self._pipe.set_state(self.Gst.State.PLAYING) self._pipe.set_state(self.Gst.State.PLAYING)
return True return True
@ -587,16 +613,6 @@ class GStreamerBackend(Backend):
self._file = self._files.pop(0) self._file = self._files.pop(0)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", syspath(self._file.path))
# Ensure the filesrc element received the paused state of the # Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner # pipeline in a blocking manner
self._src.sync_state_with_parent() self._src.sync_state_with_parent()
@ -607,6 +623,19 @@ class GStreamerBackend(Backend):
self._decbin.sync_state_with_parent() self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE) self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._decbin.link(self._conv)
self._pipe.set_state(self.Gst.State.READY)
return True return True
def _set_next_file(self): def _set_next_file(self):
@ -794,7 +823,7 @@ class ReplayGainPlugin(BeetsPlugin):
"command": CommandBackend, "command": CommandBackend,
"gstreamer": GStreamerBackend, "gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend, "audiotools": AudioToolsBackend,
"bs1770gain": Bs1770gainBackend "bs1770gain": Bs1770gainBackend,
} }
def __init__(self): def __init__(self):
@ -806,10 +835,11 @@ class ReplayGainPlugin(BeetsPlugin):
'auto': True, 'auto': True,
'backend': u'command', 'backend': u'command',
'targetlevel': 89, 'targetlevel': 89,
'r128': ['Opus'],
}) })
self.overwrite = self.config['overwrite'].get(bool) self.overwrite = self.config['overwrite'].get(bool)
backend_name = self.config['backend'].get(unicode) backend_name = self.config['backend'].as_str()
if backend_name not in self.backends: if backend_name not in self.backends:
raise ui.UserError( raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. " u"Selected ReplayGain backend {0} is not supported. "
@ -823,6 +853,9 @@ class ReplayGainPlugin(BeetsPlugin):
if self.config['auto']: if self.config['auto']:
self.import_stages = [self.imported] self.import_stages = [self.imported]
# Formats to use R128.
self.r128_whitelist = self.config['r128'].as_str_seq()
try: try:
self.backend_instance = self.backends[backend_name]( self.backend_instance = self.backends[backend_name](
self.config, self._log self.config, self._log
@ -831,9 +864,19 @@ class ReplayGainPlugin(BeetsPlugin):
raise ui.UserError( raise ui.UserError(
u'replaygain initialization failed: {0}'.format(e)) u'replaygain initialization failed: {0}'.format(e))
self.r128_backend_instance = ''
def should_use_r128(self, item):
"""Checks the plugin setting to decide whether the calculation
should be done using the EBU R128 standard and use R128_ tags instead.
"""
return item.format in self.r128_whitelist
def track_requires_gain(self, item): def track_requires_gain(self, item):
return self.overwrite or \ return self.overwrite or \
(not item.rg_track_gain or not item.rg_track_peak) (self.should_use_r128(item) and not item.r128_track_gain) or \
(not self.should_use_r128(item) and
(not item.rg_track_gain or not item.rg_track_peak))
def album_requires_gain(self, album): def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need # Skip calculating gain only when *all* files don't need
@ -841,7 +884,11 @@ class ReplayGainPlugin(BeetsPlugin):
# needs recalculation, we still get an accurate album gain # needs recalculation, we still get an accurate album gain
# value. # value.
return self.overwrite or \ return self.overwrite or \
any([not item.rg_album_gain or not item.rg_album_peak any([self.should_use_r128(item) and
(not item.r128_track_gain or not item.r128_album_gain)
for item in album.items()]) or \
any([not self.should_use_r128(item) and
(not item.rg_album_gain or not item.rg_album_peak)
for item in album.items()]) for item in album.items()])
def store_track_gain(self, item, track_gain): def store_track_gain(self, item, track_gain):
@ -852,6 +899,12 @@ class ReplayGainPlugin(BeetsPlugin):
self._log.debug(u'applied track gain {0}, peak {1}', self._log.debug(u'applied track gain {0}, peak {1}',
item.rg_track_gain, item.rg_track_peak) item.rg_track_gain, item.rg_track_peak)
def store_track_r128_gain(self, item, track_gain):
item.r128_track_gain = int(round(track_gain.gain * pow(2, 8)))
item.store()
self._log.debug(u'applied track gain {0}', item.r128_track_gain)
def store_album_gain(self, album, album_gain): def store_album_gain(self, album, album_gain):
album.rg_album_gain = album_gain.gain album.rg_album_gain = album_gain.gain
album.rg_album_peak = album_gain.peak album.rg_album_peak = album_gain.peak
@ -860,7 +913,13 @@ class ReplayGainPlugin(BeetsPlugin):
self._log.debug(u'applied album gain {0}, peak {1}', self._log.debug(u'applied album gain {0}, peak {1}',
album.rg_album_gain, album.rg_album_peak) album.rg_album_gain, album.rg_album_peak)
def handle_album(self, album, write): def store_album_r128_gain(self, album, album_gain):
album.r128_album_gain = int(round(album_gain.gain * pow(2, 8)))
album.store()
self._log.debug(u'applied album gain {0}', album.r128_album_gain)
def handle_album(self, album, write, force=False):
"""Compute album and track replay gain store it in all of the """Compute album and track replay gain store it in all of the
album's items. album's items.
@ -868,24 +927,41 @@ class ReplayGainPlugin(BeetsPlugin):
item. If replay gain information is already present in all item. If replay gain information is already present in all
items, nothing is done. items, nothing is done.
""" """
if not self.album_requires_gain(album): if not force and not self.album_requires_gain(album):
self._log.info(u'Skipping album {0}', album) self._log.info(u'Skipping album {0}', album)
return return
self._log.info(u'analyzing {0}', album) self._log.info(u'analyzing {0}', album)
if (any([self.should_use_r128(item) for item in album.items()]) and not
all(([self.should_use_r128(item) for item in album.items()]))):
raise ReplayGainError(
u"Mix of ReplayGain and EBU R128 detected"
u" for some tracks in album {0}".format(album)
)
if any([self.should_use_r128(item) for item in album.items()]):
if self.r128_backend_instance == '':
self.init_r128_backend()
backend_instance = self.r128_backend_instance
store_track_gain = self.store_track_r128_gain
store_album_gain = self.store_album_r128_gain
else:
backend_instance = self.backend_instance
store_track_gain = self.store_track_gain
store_album_gain = self.store_album_gain
try: try:
album_gain = self.backend_instance.compute_album_gain(album) album_gain = backend_instance.compute_album_gain(album)
if len(album_gain.track_gains) != len(album.items()): if len(album_gain.track_gains) != len(album.items()):
raise ReplayGainError( raise ReplayGainError(
u"ReplayGain backend failed " u"ReplayGain backend failed "
u"for some tracks in album {0}".format(album) u"for some tracks in album {0}".format(album)
) )
self.store_album_gain(album, album_gain.album_gain) store_album_gain(album, album_gain.album_gain)
for item, track_gain in itertools.izip(album.items(), for item, track_gain in zip(album.items(), album_gain.track_gains):
album_gain.track_gains): store_track_gain(item, track_gain)
self.store_track_gain(item, track_gain)
if write: if write:
item.try_write() item.try_write()
except ReplayGainError as e: except ReplayGainError as e:
@ -894,27 +970,36 @@ class ReplayGainPlugin(BeetsPlugin):
raise ui.UserError( raise ui.UserError(
u"Fatal replay gain error: {0}".format(e)) u"Fatal replay gain error: {0}".format(e))
def handle_track(self, item, write): def handle_track(self, item, write, force=False):
"""Compute track replay gain and store it in the item. """Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present the data to disk. If replay gain information is already present
in the item, nothing is done. in the item, nothing is done.
""" """
if not self.track_requires_gain(item): if not force and not self.track_requires_gain(item):
self._log.info(u'Skipping track {0}', item) self._log.info(u'Skipping track {0}', item)
return return
self._log.info(u'analyzing {0}', item) self._log.info(u'analyzing {0}', item)
if self.should_use_r128(item):
if self.r128_backend_instance == '':
self.init_r128_backend()
backend_instance = self.r128_backend_instance
store_track_gain = self.store_track_r128_gain
else:
backend_instance = self.backend_instance
store_track_gain = self.store_track_gain
try: try:
track_gains = self.backend_instance.compute_track_gain([item]) track_gains = backend_instance.compute_track_gain([item])
if len(track_gains) != 1: if len(track_gains) != 1:
raise ReplayGainError( raise ReplayGainError(
u"ReplayGain backend failed for track {0}".format(item) u"ReplayGain backend failed for track {0}".format(item)
) )
self.store_track_gain(item, track_gains[0]) store_track_gain(item, track_gains[0])
if write: if write:
item.try_write() item.try_write()
except ReplayGainError as e: except ReplayGainError as e:
@ -923,6 +1008,19 @@ class ReplayGainPlugin(BeetsPlugin):
raise ui.UserError( raise ui.UserError(
u"Fatal replay gain error: {0}".format(e)) u"Fatal replay gain error: {0}".format(e))
def init_r128_backend(self):
backend_name = 'bs1770gain'
try:
self.r128_backend_instance = self.backends[backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
u'replaygain initialization failed: {0}'.format(e))
self.r128_backend_instance.method = '--ebu'
def imported(self, session, task): def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``. """Add replay gain info to items or albums of ``task``.
""" """
@ -935,19 +1033,28 @@ class ReplayGainPlugin(BeetsPlugin):
"""Return the "replaygain" ui subcommand. """Return the "replaygain" ui subcommand.
""" """
def func(lib, opts, args): def func(lib, opts, args):
self._log.setLevel(logging.INFO) write = ui.should_write(opts.write)
force = opts.force
write = ui.should_write()
if opts.album: if opts.album:
for album in lib.albums(ui.decargs(args)): for album in lib.albums(ui.decargs(args)):
self.handle_album(album, write) self.handle_album(album, write, force)
else: else:
for item in lib.items(ui.decargs(args)): for item in lib.items(ui.decargs(args)):
self.handle_track(item, write) self.handle_track(item, write, force)
cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain') cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain')
cmd.parser.add_album_option() cmd.parser.add_album_option()
cmd.parser.add_option(
"-f", "--force", dest="force", action="store_true", default=False,
help=u"analyze all files, including those that "
"already have ReplayGain metadata")
cmd.parser.add_option(
"-w", "--write", default=None, action="store_true",
help=u"write new metadata to files' tags")
cmd.parser.add_option(
"-W", "--nowrite", dest="write", action="store_false",
help=u"don't write metadata (opposite of -w)")
cmd.func = func cmd.func = func
return [cmd] return [cmd]

View file

@ -51,7 +51,7 @@ class RewritePlugin(BeetsPlugin):
# Gather all the rewrite rules for each field. # Gather all the rewrite rules for each field.
rules = defaultdict(list) rules = defaultdict(list)
for key, view in self.config.items(): for key, view in self.config.items():
value = view.get(unicode) value = view.as_str()
try: try:
fieldname, pattern = key.split(None, 1) fieldname, pattern = key.split(None, 1)
except ValueError: except ValueError:
@ -68,7 +68,7 @@ class RewritePlugin(BeetsPlugin):
rules['albumartist'].append((pattern, value)) rules['albumartist'].append((pattern, value))
# Replace each template field with the new rewriter function. # Replace each template field with the new rewriter function.
for fieldname, fieldrules in rules.iteritems(): for fieldname, fieldrules in rules.items():
getter = rewriter(fieldname, fieldrules) getter = rewriter(fieldname, fieldrules)
self.template_fields[fieldname] = getter self.template_fields[fieldname] = getter
if fieldname in library.Album._fields: if fieldname in library.Album._fields:

View file

@ -24,6 +24,7 @@ from beets import ui
from beets import util from beets import util
from beets import config from beets import config
from beets import mediafile from beets import mediafile
import mutagen
_MUTAGEN_FORMATS = { _MUTAGEN_FORMATS = {
'asf': 'ASF', 'asf': 'ASF',
@ -106,7 +107,7 @@ class ScrubPlugin(BeetsPlugin):
for tag in f.keys(): for tag in f.keys():
del f[tag] del f[tag]
f.save() f.save()
except IOError as exc: except (IOError, mutagen.MutagenError) as exc:
self._log.error(u'could not scrub {0}: {1}', self._log.error(u'could not scrub {0}: {1}',
util.displayable_path(path), exc) util.displayable_path(path), exc)
@ -119,10 +120,11 @@ class ScrubPlugin(BeetsPlugin):
try: try:
mf = mediafile.MediaFile(util.syspath(item.path), mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool)) config['id3v23'].get(bool))
except IOError as exc: except mediafile.UnreadableFileError as exc:
self._log.error(u'could not open file to scrub: {0}', self._log.error(u'could not open file to scrub: {0}',
exc) exc)
art = mf.art return
images = mf.images
# Remove all tags. # Remove all tags.
self._scrub(item.path) self._scrub(item.path)
@ -131,12 +133,15 @@ class ScrubPlugin(BeetsPlugin):
if restore: if restore:
self._log.debug(u'writing new tags after scrub') self._log.debug(u'writing new tags after scrub')
item.try_write() item.try_write()
if art: if images:
self._log.debug(u'restoring art') self._log.debug(u'restoring art')
try:
mf = mediafile.MediaFile(util.syspath(item.path), mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool)) config['id3v23'].get(bool))
mf.art = art mf.images = images
mf.save() mf.save()
except mediafile.UnreadableFileError as exc:
self._log.error(u'could not write tags: {0}', exc)
def import_task_files(self, session, task): def import_task_files(self, session, task):
"""Automatically scrub imported files.""" """Automatically scrub imported files."""

View file

@ -20,11 +20,13 @@ from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets import ui from beets import ui
from beets.util import mkdirall, normpath, syspath from beets.util import (mkdirall, normpath, sanitize_path, syspath,
bytestring_path)
from beets.library import Item, Album, parse_query_string from beets.library import Item, Album, parse_query_string
from beets.dbcore import OrQuery from beets.dbcore import OrQuery
from beets.dbcore.query import MultipleSort, ParsingError from beets.dbcore.query import MultipleSort, ParsingError
import os import os
import six
class SmartPlaylistPlugin(BeetsPlugin): class SmartPlaylistPlugin(BeetsPlugin):
@ -97,7 +99,7 @@ class SmartPlaylistPlugin(BeetsPlugin):
for playlist in self.config['playlists'].get(list): for playlist in self.config['playlists'].get(list):
if 'name' not in playlist: if 'name' not in playlist:
self._log.warn(u"playlist configuration is missing name") self._log.warning(u"playlist configuration is missing name")
continue continue
playlist_data = (playlist['name'],) playlist_data = (playlist['name'],)
@ -106,7 +108,7 @@ class SmartPlaylistPlugin(BeetsPlugin):
qs = playlist.get(key) qs = playlist.get(key)
if qs is None: if qs is None:
query_and_sort = None, None query_and_sort = None, None
elif isinstance(qs, basestring): elif isinstance(qs, six.string_types):
query_and_sort = parse_query_string(qs, Model) query_and_sort = parse_query_string(qs, Model)
elif len(qs) == 1: elif len(qs) == 1:
query_and_sort = parse_query_string(qs[0], Model) query_and_sort = parse_query_string(qs[0], Model)
@ -133,7 +135,7 @@ class SmartPlaylistPlugin(BeetsPlugin):
playlist_data += (query_and_sort,) playlist_data += (query_and_sort,)
except ParsingError as exc: except ParsingError as exc:
self._log.warn(u"invalid query in playlist {}: {}", self._log.warning(u"invalid query in playlist {}: {}",
playlist['name'], exc) playlist['name'], exc)
continue continue
@ -165,10 +167,14 @@ class SmartPlaylistPlugin(BeetsPlugin):
len(self._matched_playlists)) len(self._matched_playlists))
playlist_dir = self.config['playlist_dir'].as_filename() playlist_dir = self.config['playlist_dir'].as_filename()
playlist_dir = bytestring_path(playlist_dir)
relative_to = self.config['relative_to'].get() relative_to = self.config['relative_to'].get()
if relative_to: if relative_to:
relative_to = normpath(relative_to) relative_to = normpath(relative_to)
# Maps playlist filenames to lists of track filenames.
m3us = {}
for playlist in self._matched_playlists: for playlist in self._matched_playlists:
name, (query, q_sort), (album_query, a_q_sort) = playlist name, (query, q_sort), (album_query, a_q_sort) = playlist
self._log.debug(u"Creating playlist {0}", name) self._log.debug(u"Creating playlist {0}", name)
@ -180,11 +186,11 @@ class SmartPlaylistPlugin(BeetsPlugin):
for album in lib.albums(album_query, a_q_sort): for album in lib.albums(album_query, a_q_sort):
items.extend(album.items()) items.extend(album.items())
m3us = {}
# As we allow tags in the m3u names, we'll need to iterate through # As we allow tags in the m3u names, we'll need to iterate through
# the items and generate the correct m3u file names. # the items and generate the correct m3u file names.
for item in items: for item in items:
m3u_name = item.evaluate_template(name, True) m3u_name = item.evaluate_template(name, True)
m3u_name = sanitize_path(m3u_name, lib.replacements)
if m3u_name not in m3us: if m3u_name not in m3us:
m3us[m3u_name] = [] m3us[m3u_name] = []
item_path = item.path item_path = item.path
@ -192,11 +198,14 @@ class SmartPlaylistPlugin(BeetsPlugin):
item_path = os.path.relpath(item.path, relative_to) item_path = os.path.relpath(item.path, relative_to)
if item_path not in m3us[m3u_name]: if item_path not in m3us[m3u_name]:
m3us[m3u_name].append(item_path) m3us[m3u_name].append(item_path)
# Now iterate through the m3us that we need to generate
# Write all of the accumulated track lists to files.
for m3u in m3us: for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir, m3u)) m3u_path = normpath(os.path.join(playlist_dir,
bytestring_path(m3u)))
mkdirall(m3u_path) mkdirall(m3u_path)
with open(syspath(m3u_path), 'w') as f: with open(syspath(m3u_path), 'wb') as f:
for path in m3us[m3u]: for path in m3us[m3u]:
f.write(path + b'\n') f.write(path + b'\n')
self._log.info(u"{0} playlists updated", len(self._matched_playlists)) self._log.info(u"{0} playlists updated", len(self._matched_playlists))

View file

@ -0,0 +1,48 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2018, Tobias Sauerwein.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Sonos library whenever the beets library is changed.
This is based on the Kodi Update plugin.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
import soco
class SonosUpdate(BeetsPlugin):
def __init__(self):
super(SonosUpdate, self).__init__()
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to a Sonos
controler.
"""
self._log.info(u'Requesting a Sonos library update...')
device = soco.discovery.any_soco()
if device:
device.music_library.start_library_update()
else:
self._log.warning(u'Could not find a Sonos device.')
return
self._log.info(u'Sonos update triggered')

View file

@ -63,7 +63,7 @@ class SpotifyPlugin(BeetsPlugin):
self.config['show_failures'].set(True) self.config['show_failures'].set(True)
if self.config['mode'].get() not in ['list', 'open']: if self.config['mode'].get() not in ['list', 'open']:
self._log.warn(u'{0} is not a valid mode', self._log.warning(u'{0} is not a valid mode',
self.config['mode'].get()) self.config['mode'].get())
return False return False
@ -124,9 +124,8 @@ class SpotifyPlugin(BeetsPlugin):
# Apply market filter if requested # Apply market filter if requested
region_filter = self.config['region_filter'].get() region_filter = self.config['region_filter'].get()
if region_filter: if region_filter:
r_data = filter( r_data = [x for x in r_data if region_filter
lambda x: region_filter in x['available_markets'], r_data in x['available_markets']]
)
# Simplest, take the first result # Simplest, take the first result
chosen_result = None chosen_result = None
@ -155,7 +154,7 @@ class SpotifyPlugin(BeetsPlugin):
self._log.info(u'track: {0}', track) self._log.info(u'track: {0}', track)
self._log.info(u'') self._log.info(u'')
else: else:
self._log.warn(u'{0} track(s) did not match a Spotify ID;\n' self._log.warning(u'{0} track(s) did not match a Spotify ID;\n'
u'use --show-failures to display', u'use --show-failures to display',
failure_count) failure_count)
@ -163,7 +162,7 @@ class SpotifyPlugin(BeetsPlugin):
def output_results(self, results): def output_results(self, results):
if results: if results:
ids = map(lambda x: x['id'], results) ids = [x['id'] for x in results]
if self.config['mode'].get() == "open": if self.config['mode'].get() == "open":
self._log.info(u'Attempting to open Spotify with playlist') self._log.info(u'Attempting to open Spotify with playlist')
spotify_url = self.playlist_partial + ",".join(ids) spotify_url = self.playlist_partial + ",".join(ids)
@ -171,6 +170,6 @@ class SpotifyPlugin(BeetsPlugin):
else: else:
for item in ids: for item in ids:
print(unicode.encode(self.open_url + item)) print(self.open_url + item)
else: else:
self._log.warn(u'No Spotify tracks found from beets query') self._log.warning(u'No Spotify tracks found from beets query')

View file

@ -54,14 +54,14 @@ class ThePlugin(BeetsPlugin):
self._log.error(u'invalid pattern: {0}', p) self._log.error(u'invalid pattern: {0}', p)
else: else:
if not (p.startswith('^') or p.endswith('$')): if not (p.startswith('^') or p.endswith('$')):
self._log.warn(u'warning: \"{0}\" will not ' self._log.warning(u'warning: \"{0}\" will not '
u'match string start/end', p) u'match string start/end', p)
if self.config['a']: if self.config['a']:
self.patterns = [PATTERN_A] + self.patterns self.patterns = [PATTERN_A] + self.patterns
if self.config['the']: if self.config['the']:
self.patterns = [PATTERN_THE] + self.patterns self.patterns = [PATTERN_THE] + self.patterns
if not self.patterns: if not self.patterns:
self._log.warn(u'no patterns defined!') self._log.warning(u'no patterns defined!')
def unthe(self, text, pattern): def unthe(self, text, pattern):
"""Moves pattern in the path format string or strips it """Moves pattern in the path format string or strips it
@ -81,7 +81,7 @@ class ThePlugin(BeetsPlugin):
if self.config['strip']: if self.config['strip']:
return r return r
else: else:
fmt = self.config['format'].get(unicode) fmt = self.config['format'].as_str()
return fmt.format(r, t.strip()).strip() return fmt.format(r, t.strip()).strip()
else: else:
return u'' return u''

View file

@ -35,6 +35,7 @@ from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, decargs from beets.ui import Subcommand, decargs
from beets import util from beets import util
from beets.util.artresizer import ArtResizer, get_im_version, get_pil_version from beets.util.artresizer import ArtResizer, get_im_version, get_pil_version
import six
BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails") BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails")
@ -162,15 +163,16 @@ class ThumbnailsPlugin(BeetsPlugin):
See http://standards.freedesktop.org/thumbnail-spec/latest/x227.html See http://standards.freedesktop.org/thumbnail-spec/latest/x227.html
""" """
uri = self.get_uri(path) uri = self.get_uri(path)
hash = md5(uri).hexdigest() hash = md5(uri.encode('utf-8')).hexdigest()
return b"{0}.png".format(hash) return util.bytestring_path("{0}.png".format(hash))
def add_tags(self, album, image_path): def add_tags(self, album, image_path):
"""Write required metadata to the thumbnail """Write required metadata to the thumbnail
See http://standards.freedesktop.org/thumbnail-spec/latest/x142.html See http://standards.freedesktop.org/thumbnail-spec/latest/x142.html
""" """
mtime = os.stat(album.artpath).st_mtime
metadata = {"Thumb::URI": self.get_uri(album.artpath), metadata = {"Thumb::URI": self.get_uri(album.artpath),
"Thumb::MTime": unicode(os.stat(album.artpath).st_mtime)} "Thumb::MTime": six.text_type(mtime)}
try: try:
self.write_metadata(image_path, metadata) self.write_metadata(image_path, metadata)
except Exception: except Exception:
@ -183,7 +185,8 @@ class ThumbnailsPlugin(BeetsPlugin):
return return
artfile = os.path.split(album.artpath)[1] artfile = os.path.split(album.artpath)[1]
with open(outfilename, 'w') as f: with open(outfilename, 'w') as f:
f.write(b"[Desktop Entry]\nIcon=./{0}".format(artfile)) f.write('[Desktop Entry]\n')
f.write('Icon=./{0}'.format(artfile.decode('utf-8')))
f.close() f.close()
self._log.debug(u"Wrote file {0}", util.displayable_path(outfilename)) self._log.debug(u"Wrote file {0}", util.displayable_path(outfilename))
@ -232,7 +235,7 @@ def copy_c_string(c_string):
# work. A more surefire way would be to allocate a ctypes buffer and copy # work. A more surefire way would be to allocate a ctypes buffer and copy
# the data with `memcpy` or somesuch. # the data with `memcpy` or somesuch.
s = ctypes.cast(c_string, ctypes.c_char_p).value s = ctypes.cast(c_string, ctypes.c_char_p).value
return '' + s return b'' + s
class GioURI(URIGetter): class GioURI(URIGetter):
@ -271,8 +274,6 @@ class GioURI(URIGetter):
try: try:
uri_ptr = self.libgio.g_file_get_uri(g_file_ptr) uri_ptr = self.libgio.g_file_get_uri(g_file_ptr)
except:
raise
finally: finally:
self.libgio.g_object_unref(g_file_ptr) self.libgio.g_object_unref(g_file_ptr)
if not uri_ptr: if not uri_ptr:
@ -282,8 +283,12 @@ class GioURI(URIGetter):
try: try:
uri = copy_c_string(uri_ptr) uri = copy_c_string(uri_ptr)
except:
raise
finally: finally:
self.libgio.g_free(uri_ptr) self.libgio.g_free(uri_ptr)
return uri
try:
return uri.decode(util._fsencoding())
except UnicodeDecodeError:
raise RuntimeError(
"Could not decode filename from GIO: {!r}".format(uri)
)

View file

@ -24,7 +24,9 @@ import flask
from flask import g from flask import g
from werkzeug.routing import BaseConverter, PathConverter from werkzeug.routing import BaseConverter, PathConverter
import os import os
from unidecode import unidecode
import json import json
import base64
# Utilities. # Utilities.
@ -37,8 +39,16 @@ def _rep(obj, expand=False):
out = dict(obj) out = dict(obj)
if isinstance(obj, beets.library.Item): if isinstance(obj, beets.library.Item):
if app.config.get('INCLUDE_PATHS', False):
out['path'] = util.displayable_path(out['path'])
else:
del out['path'] del out['path']
# Filter all bytes attributes and convert them to strings.
for key, value in out.items():
if isinstance(out[key], bytes):
out[key] = base64.b64encode(value).decode('ascii')
# Get the size (in bytes) of the backing file. This is useful # Get the size (in bytes) of the backing file. This is useful
# for the Tomahawk resolver API. # for the Tomahawk resolver API.
try: try:
@ -55,11 +65,13 @@ def _rep(obj, expand=False):
return out return out
def json_generator(items, root): def json_generator(items, root, expand=False):
"""Generator that dumps list of beets Items or Albums as JSON """Generator that dumps list of beets Items or Albums as JSON
:param root: root key for JSON :param root: root key for JSON
:param items: list of :class:`Item` or :class:`Album` to dump :param items: list of :class:`Item` or :class:`Album` to dump
:param expand: If true every :class:`Album` contains its items in the json
representation
:returns: generator that yields strings :returns: generator that yields strings
""" """
yield '{"%s":[' % root yield '{"%s":[' % root
@ -69,10 +81,16 @@ def json_generator(items, root):
first = False first = False
else: else:
yield ',' yield ','
yield json.dumps(_rep(item)) yield json.dumps(_rep(item, expand=expand))
yield ']}' yield ']}'
def is_expand():
"""Returns whether the current request is for an expanded response."""
return flask.request.args.get('expand') is not None
def resource(name): def resource(name):
"""Decorates a function to handle RESTful HTTP requests for a resource. """Decorates a function to handle RESTful HTTP requests for a resource.
""" """
@ -82,7 +100,7 @@ def resource(name):
entities = [entity for entity in entities if entity] entities = [entity for entity in entities if entity]
if len(entities) == 1: if len(entities) == 1:
return flask.jsonify(_rep(entities[0])) return flask.jsonify(_rep(entities[0], expand=is_expand()))
elif entities: elif entities:
return app.response_class( return app.response_class(
json_generator(entities, root=name), json_generator(entities, root=name),
@ -101,7 +119,10 @@ def resource_query(name):
def make_responder(query_func): def make_responder(query_func):
def responder(queries): def responder(queries):
return app.response_class( return app.response_class(
json_generator(query_func(queries), root='results'), json_generator(
query_func(queries),
root='results', expand=is_expand()
),
mimetype='application/json' mimetype='application/json'
) )
responder.__name__ = 'query_{0}'.format(name) responder.__name__ = 'query_{0}'.format(name)
@ -116,7 +137,7 @@ def resource_list(name):
def make_responder(list_all): def make_responder(list_all):
def responder(): def responder():
return app.response_class( return app.response_class(
json_generator(list_all(), root=name), json_generator(list_all(), root=name, expand=is_expand()),
mimetype='application/json' mimetype='application/json'
) )
responder.__name__ = 'all_{0}'.format(name) responder.__name__ = 'all_{0}'.format(name)
@ -162,11 +183,16 @@ class QueryConverter(PathConverter):
return ','.join(value) return ','.join(value)
class EverythingConverter(PathConverter):
regex = '.*?'
# Flask setup. # Flask setup.
app = flask.Flask(__name__) app = flask.Flask(__name__)
app.url_map.converters['idlist'] = IdListConverter app.url_map.converters['idlist'] = IdListConverter
app.url_map.converters['query'] = QueryConverter app.url_map.converters['query'] = QueryConverter
app.url_map.converters['everything'] = EverythingConverter
@app.before_request @app.before_request
@ -192,9 +218,34 @@ def all_items():
@app.route('/item/<int:item_id>/file') @app.route('/item/<int:item_id>/file')
def item_file(item_id): def item_file(item_id):
item = g.lib.get_item(item_id) item = g.lib.get_item(item_id)
response = flask.send_file(item.path, as_attachment=True,
attachment_filename=os.path.basename(item.path)) # On Windows under Python 2, Flask wants a Unicode path. On Python 3, it
response.headers['Content-Length'] = os.path.getsize(item.path) # *always* wants a Unicode path.
if os.name == 'nt':
item_path = util.syspath(item.path)
else:
item_path = util.py3_path(item.path)
try:
unicode_item_path = util.text_string(item.path)
except (UnicodeDecodeError, UnicodeEncodeError):
unicode_item_path = util.displayable_path(item.path)
base_filename = os.path.basename(unicode_item_path)
try:
# Imitate http.server behaviour
base_filename.encode("latin-1", "strict")
except UnicodeEncodeError:
safe_filename = unidecode(base_filename)
else:
safe_filename = base_filename
response = flask.send_file(
item_path,
as_attachment=True,
attachment_filename=safe_filename
)
response.headers['Content-Length'] = os.path.getsize(item_path)
return response return response
@ -204,6 +255,16 @@ def item_query(queries):
return g.lib.items(queries) return g.lib.items(queries)
@app.route('/item/path/<everything:path>')
def item_at_path(path):
query = beets.library.PathQuery('path', path.encode('utf-8'))
item = g.lib.items(query).get()
if item:
return flask.jsonify(_rep(item))
else:
return flask.abort(404)
@app.route('/item/values/<string:key>') @app.route('/item/values/<string:key>')
def item_unique_field_values(key): def item_unique_field_values(key):
sort_key = flask.request.args.get('sort_key', key) sort_key = flask.request.args.get('sort_key', key)
@ -239,8 +300,8 @@ def album_query(queries):
@app.route('/album/<int:album_id>/art') @app.route('/album/<int:album_id>/art')
def album_art(album_id): def album_art(album_id):
album = g.lib.get_album(album_id) album = g.lib.get_album(album_id)
if album.artpath: if album and album.artpath:
return flask.send_file(album.artpath) return flask.send_file(album.artpath.decode())
else: else:
return flask.abort(404) return flask.abort(404)
@ -295,6 +356,9 @@ class WebPlugin(BeetsPlugin):
'host': u'127.0.0.1', 'host': u'127.0.0.1',
'port': 8337, 'port': 8337,
'cors': '', 'cors': '',
'cors_supports_credentials': False,
'reverse_proxy': False,
'include_paths': False,
}) })
def commands(self): def commands(self):
@ -310,6 +374,11 @@ class WebPlugin(BeetsPlugin):
self.config['port'] = int(args.pop(0)) self.config['port'] = int(args.pop(0))
app.config['lib'] = lib app.config['lib'] = lib
# Normalizes json output
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.config['INCLUDE_PATHS'] = self.config['include_paths']
# Enable CORS if required. # Enable CORS if required.
if self.config['cors']: if self.config['cors']:
self._log.info(u'Enabling CORS with origin: {0}', self._log.info(u'Enabling CORS with origin: {0}',
@ -319,10 +388,56 @@ class WebPlugin(BeetsPlugin):
app.config['CORS_RESOURCES'] = { app.config['CORS_RESOURCES'] = {
r"/*": {"origins": self.config['cors'].get(str)} r"/*": {"origins": self.config['cors'].get(str)}
} }
CORS(app) CORS(
app,
supports_credentials=self.config[
'cors_supports_credentials'
].get(bool)
)
# Allow serving behind a reverse proxy
if self.config['reverse_proxy']:
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Start the web application. # Start the web application.
app.run(host=self.config['host'].get(unicode), app.run(host=self.config['host'].as_str(),
port=self.config['port'].get(int), port=self.config['port'].get(int),
debug=opts.debug, threaded=True) debug=opts.debug, threaded=True)
cmd.func = func cmd.func = func
return [cmd] return [cmd]
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
From: http://flask.pocoo.org/snippets/35/
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)

View file

@ -4,7 +4,7 @@ var timeFormat = function(secs) {
return '0:00'; return '0:00';
} }
secs = Math.round(secs); secs = Math.round(secs);
var mins = '' + Math.round(secs / 60); var mins = '' + Math.floor(secs / 60);
secs = '' + (secs % 60); secs = '' + (secs % 60);
if (secs.length < 2) { if (secs.length < 2) {
secs = '0' + secs; secs = '0' + secs;
@ -147,7 +147,7 @@ var BeetsRouter = Backbone.Router.extend({
}, },
itemQuery: function(query) { itemQuery: function(query) {
var queryURL = query.split(/\s+/).map(encodeURIComponent).join('/'); var queryURL = query.split(/\s+/).map(encodeURIComponent).join('/');
$.getJSON('/item/query/' + queryURL, function(data) { $.getJSON('item/query/' + queryURL, function(data) {
var models = _.map( var models = _.map(
data['results'], data['results'],
function(d) { return new Item(d); } function(d) { return new Item(d); }
@ -161,7 +161,7 @@ var router = new BeetsRouter();
// Model. // Model.
var Item = Backbone.Model.extend({ var Item = Backbone.Model.extend({
urlRoot: '/item' urlRoot: 'item'
}); });
var Items = Backbone.Collection.extend({ var Items = Backbone.Collection.extend({
model: Item model: Item
@ -264,7 +264,7 @@ var AppView = Backbone.View.extend({
$('#extra-detail').empty().append(extraDetailView.render().el); $('#extra-detail').empty().append(extraDetailView.render().el);
}, },
playItem: function(item) { playItem: function(item) {
var url = '/item/' + item.get('id') + '/file'; var url = 'item/' + item.get('id') + '/file';
$('#player audio').attr('src', url); $('#player audio').attr('src', url);
$('#player audio').get(0).play(); $('#player audio').get(0).play();

View file

@ -82,7 +82,7 @@
<% } %> <% } %>
<dt>File</dt> <dt>File</dt>
<dd> <dd>
<a target="_blank" class="download" href="/item/<%= id %>/file">download</a> <a target="_blank" class="download" href="item/<%= id %>/file">download</a>
</dd> </dd>
<% if (lyrics) { %> <% if (lyrics) { %>
<dt>Lyrics</dt> <dt>Lyrics</dt>

View file

@ -16,125 +16,148 @@
""" Clears tag fields in media files.""" """ Clears tag fields in media files."""
from __future__ import division, absolute_import, print_function from __future__ import division, absolute_import, print_function
import six
import re import re
from beets.plugins import BeetsPlugin from beets.plugins import BeetsPlugin
from beets.mediafile import MediaFile from beets.mediafile import MediaFile
from beets.importer import action from beets.importer import action
from beets.ui import Subcommand, decargs, input_yn
from beets.util import confit from beets.util import confit
__author__ = 'baobab@heresiarch.info' __author__ = 'baobab@heresiarch.info'
__version__ = '0.10'
class ZeroPlugin(BeetsPlugin): class ZeroPlugin(BeetsPlugin):
_instance = None
def __init__(self): def __init__(self):
super(ZeroPlugin, self).__init__() super(ZeroPlugin, self).__init__()
# Listeners.
self.register_listener('write', self.write_event) self.register_listener('write', self.write_event)
self.register_listener('import_task_choice', self.register_listener('import_task_choice',
self.import_task_choice_event) self.import_task_choice_event)
self.config.add({ self.config.add({
'auto': True,
'fields': [], 'fields': [],
'keep_fields': [], 'keep_fields': [],
'update_database': False, 'update_database': False,
}) })
self.patterns = {} self.fields_to_progs = {}
self.warned = False self.warned = False
# We'll only handle `fields` or `keep_fields`, but not both. """Read the bulk of the config into `self.fields_to_progs`.
After construction, `fields_to_progs` contains all the fields that
should be zeroed as keys and maps each of those to a list of compiled
regexes (progs) as values.
A field is zeroed if its value matches one of the associated progs. If
progs is empty, then the associated field is always zeroed.
"""
if self.config['fields'] and self.config['keep_fields']: if self.config['fields'] and self.config['keep_fields']:
self._log.warn(u'cannot blacklist and whitelist at the same time') self._log.warning(
u'cannot blacklist and whitelist at the same time'
)
# Blacklist mode. # Blacklist mode.
if self.config['fields']: elif self.config['fields']:
self.validate_config('fields')
for field in self.config['fields'].as_str_seq(): for field in self.config['fields'].as_str_seq():
self.set_pattern(field) self._set_pattern(field)
# Whitelist mode. # Whitelist mode.
elif self.config['keep_fields']: elif self.config['keep_fields']:
self.validate_config('keep_fields')
for field in MediaFile.fields(): for field in MediaFile.fields():
if field in self.config['keep_fields'].as_str_seq(): if (field not in self.config['keep_fields'].as_str_seq() and
continue
self.set_pattern(field)
# These fields should always be preserved. # These fields should always be preserved.
for key in ('id', 'path', 'album_id'): field not in ('id', 'path', 'album_id')):
if key in self.patterns: self._set_pattern(field)
del self.patterns[key]
def validate_config(self, mode): def commands(self):
"""Check whether fields in the configuration are valid. zero_command = Subcommand('zero', help='set fields to null')
`mode` should either be "fields" or "keep_fields", indicating def zero_fields(lib, opts, args):
the section of the configuration to validate. if not decargs(args) and not input_yn(
u"Remove fields for all items? (Y/n)",
True):
return
for item in lib.items(decargs(args)):
self.process_item(item)
zero_command.func = zero_fields
return [zero_command]
def _set_pattern(self, field):
"""Populate `self.fields_to_progs` for a given field.
Do some sanity checks then compile the regexes.
""" """
for field in self.config[mode].as_str_seq():
if field not in MediaFile.fields(): if field not in MediaFile.fields():
self._log.error(u'invalid field: {0}', field) self._log.error(u'invalid field: {0}', field)
continue elif field in ('id', 'path', 'album_id'):
if mode == 'fields' and field in ('id', 'path', 'album_id'): self._log.warning(u'field \'{0}\' ignored, zeroing '
self._log.warn(u'field \'{0}\' ignored, zeroing '
u'it would be dangerous', field) u'it would be dangerous', field)
continue else:
def set_pattern(self, field):
"""Set a field in `self.patterns` to a string list corresponding to
the configuration, or `True` if the field has no specific
configuration.
"""
try: try:
self.patterns[field] = self.config[field].as_str_seq() for pattern in self.config[field].as_str_seq():
prog = re.compile(pattern, re.IGNORECASE)
self.fields_to_progs.setdefault(field, []).append(prog)
except confit.NotFoundError: except confit.NotFoundError:
# Matches everything # Matches everything
self.patterns[field] = True self.fields_to_progs[field] = []
def import_task_choice_event(self, session, task): def import_task_choice_event(self, session, task):
"""Listen for import_task_choice event."""
if task.choice_flag == action.ASIS and not self.warned: if task.choice_flag == action.ASIS and not self.warned:
self._log.warn(u'cannot zero in \"as-is\" mode') self._log.warning(u'cannot zero in \"as-is\" mode')
self.warned = True self.warned = True
# TODO request write in as-is mode # TODO request write in as-is mode
@classmethod def write_event(self, item, path, tags):
def match_patterns(cls, field, patterns): if self.config['auto']:
"""Check if field (as string) is matching any of the patterns in self.set_fields(item, tags)
the list.
def set_fields(self, item, tags):
"""Set values in `tags` to `None` if the field is in
`self.fields_to_progs` and any of the corresponding `progs` matches the
field value.
Also update the `item` itself if `update_database` is set in the
config.
""" """
if patterns is True: fields_set = False
return True
for p in patterns: if not self.fields_to_progs:
if re.search(p, unicode(field), flags=re.IGNORECASE): self._log.warning(u'no fields, nothing to do')
return True
return False return False
def write_event(self, item, path, tags): for field, progs in self.fields_to_progs.items():
"""Set values in tags to `None` if the key and value are matched
by `self.patterns`.
"""
if not self.patterns:
self._log.warn(u'no fields, nothing to do')
return
for field, patterns in self.patterns.items():
if field in tags: if field in tags:
value = tags[field] value = tags[field]
match = self.match_patterns(tags[field], patterns) match = _match_progs(tags[field], progs)
else: else:
value = '' value = ''
match = patterns is True match = not progs
if match: if match:
fields_set = True
self._log.debug(u'{0}: {1} -> None', field, value) self._log.debug(u'{0}: {1} -> None', field, value)
tags[field] = None tags[field] = None
if self.config['update_database']: if self.config['update_database']:
item[field] = None item[field] = None
return fields_set
def process_item(self, item):
tags = dict(item)
if self.set_fields(item, tags):
item.write(tags=tags)
if self.config['update_database']:
item.store(fields=tags)
def _match_progs(value, progs):
"""Check if `value` (as string) is matching any of the compiled regexes in
the `progs` list.
"""
if not progs:
return True
for prog in progs:
if prog.search(six.text_type(value)):
return True
return False

BIN
libs/bin/beet.exe Normal file

Binary file not shown.

16
libs/bin/mid3cp Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mid3cp import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/mid3iconv Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mid3iconv import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/mid3v2 Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mid3v2 import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/moggsplit Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.moggsplit import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/mutagen-inspect Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mutagen_inspect import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/mutagen-pony Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mutagen_pony import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

BIN
libs/bin/unidecode.exe Normal file

Binary file not shown.

View file

@ -3,5 +3,4 @@ from .initialise import init, deinit, reinit, colorama_text
from .ansi import Fore, Back, Style, Cursor from .ansi import Fore, Back, Style, Cursor
from .ansitowin32 import AnsiToWin32 from .ansitowin32 import AnsiToWin32
__version__ = '0.3.7' __version__ = '0.4.1'

View file

@ -13,14 +13,6 @@ if windll is not None:
winterm = WinTerm() winterm = WinTerm()
def is_stream_closed(stream):
return not hasattr(stream, 'closed') or stream.closed
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object): class StreamWrapper(object):
''' '''
Wraps a stream (such as stdout), acting as a transparent proxy for all Wraps a stream (such as stdout), acting as a transparent proxy for all
@ -36,9 +28,38 @@ class StreamWrapper(object):
def __getattr__(self, name): def __getattr__(self, name):
return getattr(self.__wrapped, name) return getattr(self.__wrapped, name)
def __enter__(self, *args, **kwargs):
# special method lookup bypasses __getattr__/__getattribute__, see
# https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit
# thus, contextlib magic methods are not proxied via __getattr__
return self.__wrapped.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
return self.__wrapped.__exit__(*args, **kwargs)
def write(self, text): def write(self, text):
self.__convertor.write(text) self.__convertor.write(text)
def isatty(self):
stream = self.__wrapped
if 'PYCHARM_HOSTED' in os.environ:
if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):
return True
try:
stream_isatty = stream.isatty
except AttributeError:
return False
else:
return stream_isatty()
@property
def closed(self):
stream = self.__wrapped
try:
return stream.closed
except AttributeError:
return True
class AnsiToWin32(object): class AnsiToWin32(object):
''' '''
@ -46,8 +67,8 @@ class AnsiToWin32(object):
sequences from the text, and if outputting to a tty, will convert them into sequences from the text, and if outputting to a tty, will convert them into
win32 function calls. win32 function calls.
''' '''
ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command ANSI_OSC_RE = re.compile('\001?\033\\]((?:.|;)*?)(\x07)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False): def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr) # The wrapped stream (normally sys.stdout or sys.stderr)
@ -68,12 +89,12 @@ class AnsiToWin32(object):
# should we strip ANSI sequences from our output? # should we strip ANSI sequences from our output?
if strip is None: if strip is None:
strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped)) strip = conversion_supported or (not self.stream.closed and not self.stream.isatty())
self.strip = strip self.strip = strip
# should we should convert ANSI sequences into win32 calls? # should we should convert ANSI sequences into win32 calls?
if convert is None: if convert is None:
convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped) convert = conversion_supported and not self.stream.closed and self.stream.isatty()
self.convert = convert self.convert = convert
# dict of ansi codes to win32 functions and parameters # dict of ansi codes to win32 functions and parameters
@ -149,7 +170,7 @@ class AnsiToWin32(object):
def reset_all(self): def reset_all(self):
if self.convert: if self.convert:
self.call_win32('m', (0,)) self.call_win32('m', (0,))
elif not self.strip and not is_stream_closed(self.wrapped): elif not self.strip and not self.stream.closed:
self.wrapped.write(Style.RESET_ALL) self.wrapped.write(Style.RESET_ALL)

View file

@ -78,5 +78,3 @@ def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrapper.should_wrap(): if wrapper.should_wrap():
stream = wrapper.stream stream = wrapper.stream
return stream return stream

View file

@ -83,33 +83,31 @@ else:
] ]
_FillConsoleOutputAttribute.restype = wintypes.BOOL _FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
_SetConsoleTitleW.argtypes = [ _SetConsoleTitleW.argtypes = [
wintypes.LPCSTR wintypes.LPCWSTR
] ]
_SetConsoleTitleW.restype = wintypes.BOOL _SetConsoleTitleW.restype = wintypes.BOOL
handles = { def _winapi_test(handle):
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def winapi_test():
handle = handles[STDOUT]
csbi = CONSOLE_SCREEN_BUFFER_INFO() csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo( success = _GetConsoleScreenBufferInfo(
handle, byref(csbi)) handle, byref(csbi))
return bool(success) return bool(success)
def winapi_test():
return any(_winapi_test(h) for h in
(_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
def GetConsoleScreenBufferInfo(stream_id=STDOUT): def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id] handle = _GetStdHandle(stream_id)
csbi = CONSOLE_SCREEN_BUFFER_INFO() csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo( success = _GetConsoleScreenBufferInfo(
handle, byref(csbi)) handle, byref(csbi))
return csbi return csbi
def SetConsoleTextAttribute(stream_id, attrs): def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id] handle = _GetStdHandle(stream_id)
return _SetConsoleTextAttribute(handle, attrs) return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True): def SetConsoleCursorPosition(stream_id, position, adjust=True):
@ -127,11 +125,11 @@ else:
adjusted_position.Y += sr.Top adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left adjusted_position.X += sr.Left
# Resume normal processing # Resume normal processing
handle = handles[stream_id] handle = _GetStdHandle(stream_id)
return _SetConsoleCursorPosition(handle, adjusted_position) return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start): def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id] handle = _GetStdHandle(stream_id)
char = c_char(char.encode()) char = c_char(char.encode())
length = wintypes.DWORD(length) length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0) num_written = wintypes.DWORD(0)
@ -142,7 +140,7 @@ else:
def FillConsoleOutputAttribute(stream_id, attr, length, start): def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )''' ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id] handle = _GetStdHandle(stream_id)
attribute = wintypes.WORD(attr) attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length) length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0) num_written = wintypes.DWORD(0)

View file

@ -44,6 +44,7 @@ class WinTerm(object):
def reset_all(self, on_stderr=None): def reset_all(self, on_stderr=None):
self.set_attrs(self._default) self.set_attrs(self._default)
self.set_console(attrs=self._default) self.set_console(attrs=self._default)
self._light = 0
def fore(self, fore=None, light=False, on_stderr=False): def fore(self, fore=None, light=False, on_stderr=False):
if fore is None: if fore is None:
@ -122,12 +123,15 @@ class WinTerm(object):
if mode == 0: if mode == 0:
from_coord = csbi.dwCursorPosition from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor cells_to_erase = cells_in_screen - cells_before_cursor
if mode == 1: elif mode == 1:
from_coord = win32.COORD(0, 0) from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor cells_to_erase = cells_before_cursor
elif mode == 2: elif mode == 2:
from_coord = win32.COORD(0, 0) from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen cells_to_erase = cells_in_screen
else:
# invalid mode
return
# fill the entire screen with blanks # fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly # now set the buffer's attributes accordingly
@ -147,12 +151,15 @@ class WinTerm(object):
if mode == 0: if mode == 0:
from_coord = csbi.dwCursorPosition from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
if mode == 1: elif mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2: elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y) from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X cells_to_erase = csbi.dwSize.X
else:
# invalid mode
return
# fill the entire screen with blanks # fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord) win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly # now set the buffer's attributes accordingly

View file

@ -1,4 +1,6 @@
try: try:
from .cjellyfish import * # noqa from .cjellyfish import * # noqa
library = "C"
except ImportError: except ImportError:
from ._jellyfish import * # noqa from ._jellyfish import * # noqa
library = "Python"

View file

@ -1,6 +1,6 @@
import unicodedata import unicodedata
from collections import defaultdict from collections import defaultdict
from .compat import _range, _zip_longest, _no_bytes_err from .compat import _range, _zip_longest, IS_PY3
from .porter import Stemmer from .porter import Stemmer
@ -8,9 +8,16 @@ def _normalize(s):
return unicodedata.normalize('NFKD', s) return unicodedata.normalize('NFKD', s)
def _check_type(s):
if IS_PY3 and not isinstance(s, str):
raise TypeError('expected str or unicode, got %s' % type(s).__name__)
elif not IS_PY3 and not isinstance(s, unicode):
raise TypeError('expected unicode, got %s' % type(s).__name__)
def levenshtein_distance(s1, s2): def levenshtein_distance(s1, s2):
if isinstance(s1, bytes) or isinstance(s2, bytes): _check_type(s1)
raise TypeError(_no_bytes_err) _check_type(s2)
if s1 == s2: if s1 == s2:
return 0 return 0
@ -36,14 +43,14 @@ def levenshtein_distance(s1, s2):
def _jaro_winkler(ying, yang, long_tolerance, winklerize): def _jaro_winkler(ying, yang, long_tolerance, winklerize):
if isinstance(ying, bytes) or isinstance(yang, bytes): _check_type(ying)
raise TypeError(_no_bytes_err) _check_type(yang)
ying_len = len(ying) ying_len = len(ying)
yang_len = len(yang) yang_len = len(yang)
if not ying_len or not yang_len: if not ying_len or not yang_len:
return 0 return 0.0
min_len = max(ying_len, yang_len) min_len = max(ying_len, yang_len)
search_range = (min_len // 2) - 1 search_range = (min_len // 2) - 1
@ -66,7 +73,7 @@ def _jaro_winkler(ying, yang, long_tolerance, winklerize):
# short circuit if no characters match # short circuit if no characters match
if not common_chars: if not common_chars:
return 0 return 0.0
# count transpositions # count transpositions
k = trans_count = 0 k = trans_count = 0
@ -106,8 +113,8 @@ def _jaro_winkler(ying, yang, long_tolerance, winklerize):
def damerau_levenshtein_distance(s1, s2): def damerau_levenshtein_distance(s1, s2):
if isinstance(s1, bytes) or isinstance(s2, bytes): _check_type(s1)
raise TypeError(_no_bytes_err) _check_type(s2)
len1 = len(s1) len1 = len(s1)
len2 = len(s2) len2 = len(s2)
@ -155,25 +162,27 @@ def jaro_winkler(s1, s2, long_tolerance=False):
def soundex(s): def soundex(s):
_check_type(s)
if not s: if not s:
return s return ''
if isinstance(s, bytes):
raise TypeError(_no_bytes_err)
s = _normalize(s) s = _normalize(s)
s = s.upper()
replacements = (('bfpv', '1'), replacements = (('BFPV', '1'),
('cgjkqsxz', '2'), ('CGJKQSXZ', '2'),
('dt', '3'), ('DT', '3'),
('l', '4'), ('L', '4'),
('mn', '5'), ('MN', '5'),
('r', '6')) ('R', '6'))
result = [s[0]] result = [s[0]]
count = 1 count = 1
# find would-be replacment for first character # find would-be replacment for first character
for lset, sub in replacements: for lset, sub in replacements:
if s[0].lower() in lset: if s[0] in lset:
last = sub last = sub
break break
else: else:
@ -181,7 +190,7 @@ def soundex(s):
for letter in s[1:]: for letter in s[1:]:
for lset, sub in replacements: for lset, sub in replacements:
if letter.lower() in lset: if letter in lset:
if sub != last: if sub != last:
result.append(sub) result.append(sub)
count += 1 count += 1
@ -197,8 +206,8 @@ def soundex(s):
def hamming_distance(s1, s2): def hamming_distance(s1, s2):
if isinstance(s1, bytes) or isinstance(s2, bytes): _check_type(s1)
raise TypeError(_no_bytes_err) _check_type(s2)
# ensure length of s1 >= s2 # ensure length of s1 >= s2
if len(s2) > len(s1): if len(s2) > len(s1):
@ -214,8 +223,9 @@ def hamming_distance(s1, s2):
def nysiis(s): def nysiis(s):
if isinstance(s, bytes):
raise TypeError(_no_bytes_err) _check_type(s)
if not s: if not s:
return '' return ''
@ -303,8 +313,8 @@ def nysiis(s):
def match_rating_codex(s): def match_rating_codex(s):
if isinstance(s, bytes): _check_type(s)
raise TypeError(_no_bytes_err)
s = s.upper() s = s.upper()
codex = [] codex = []
@ -368,8 +378,7 @@ def match_rating_comparison(s1, s2):
def metaphone(s): def metaphone(s):
if isinstance(s, bytes): _check_type(s)
raise TypeError(_no_bytes_err)
result = [] result = []
@ -457,8 +466,9 @@ def metaphone(s):
elif c == 'w': elif c == 'w':
if i == 0 and next == 'h': if i == 0 and next == 'h':
i += 1 i += 1
next = s[i+1] if nextnext in 'aeiou' or nextnext == '*****':
if next in 'aeiou': result.append('w')
elif next in 'aeiou' or next == '*****':
result.append('w') result.append('w')
elif c == 'x': elif c == 'x':
if i == 0: if i == 0:
@ -484,6 +494,6 @@ def metaphone(s):
def porter_stem(s): def porter_stem(s):
if isinstance(s, bytes): _check_type(s)
raise TypeError(_no_bytes_err)
return Stemmer(s).stem() return Stemmer(s).stem()

Some files were not shown because too many files have changed in this diff Show more