Update beets to 1.4.7

Also updates:
- colorama-0.4.1
- jellyfish-0.6.1
- munkres-1.0.12
- musicbrainzngs-0.6
- mutagen-1.41.1
- pyyaml-3.13
- six-1.12.0
- unidecode-1.0.23
This commit is contained in:
Labrys of Knossos 2018-12-15 00:52:11 -05:00
parent 05b0fb498f
commit e854005ae1
193 changed files with 15896 additions and 6384 deletions

BIN
libs/_yaml.cp37-win32.pyd Normal file

Binary file not shown.

View file

@ -19,7 +19,7 @@ import os
from beets.util import confit
__version__ = u'1.3.18'
__version__ = u'1.4.7'
__author__ = u'Adrian Sampson <adrian@radbox.org>'

26
libs/beets/__main__.py Normal file
View file

@ -0,0 +1,26 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""The __main__ module lets you run the beets CLI interface by typing
`python -m beets`.
"""
from __future__ import division, absolute_import, print_function
import sys
from .ui import main
if __name__ == "__main__":
main(sys.argv[1:])

View file

@ -22,10 +22,9 @@ from __future__ import division, absolute_import, print_function
import subprocess
import platform
from tempfile import NamedTemporaryFile
import imghdr
import os
from beets.util import displayable_path, syspath
from beets.util import displayable_path, syspath, bytestring_path
from beets.util.artresizer import ArtResizer
from beets import mediafile
@ -124,26 +123,49 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
is_windows = platform.system() == "Windows"
# Converting images to grayscale tends to minimize the weight
# of colors in the diff score.
# of colors in the diff score. So we first convert both images
# to grayscale and then pipe them into the `compare` command.
# On Windows, ImageMagick doesn't support the magic \\?\ prefix
# on paths, so we pass `prefix=False` to `syspath`.
convert_cmd = ['convert', syspath(imagepath, prefix=False),
syspath(art, prefix=False),
'-colorspace', 'gray', 'MIFF:-']
compare_cmd = ['compare', '-metric', 'PHASH', '-', 'null:']
log.debug(u'comparing images with pipeline {} | {}',
convert_cmd, compare_cmd)
convert_proc = subprocess.Popen(
[b'convert', syspath(imagepath), syspath(art),
b'-colorspace', b'gray', b'MIFF:-'],
convert_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows,
)
compare_proc = subprocess.Popen(
[b'compare', b'-metric', b'PHASH', b'-', b'null:'],
compare_cmd,
stdin=convert_proc.stdout,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=not is_windows,
)
convert_proc.stdout.close()
# Check the convert output. We're not interested in the
# standard output; that gets piped to the next stage.
convert_proc.stdout.close()
convert_stderr = convert_proc.stderr.read()
convert_proc.stderr.close()
convert_proc.wait()
if convert_proc.returncode:
log.debug(
u'ImageMagick convert failed with status {}: {!r}',
convert_proc.returncode,
convert_stderr,
)
return
# Check the compare output.
stdout, stderr = compare_proc.communicate()
if compare_proc.returncode:
if compare_proc.returncode != 1:
log.debug(u'IM phashes compare failed for {0}, {1}',
log.debug(u'ImageMagick compare failed: {0}, {1}',
displayable_path(imagepath),
displayable_path(art))
return
@ -157,7 +179,7 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
log.debug(u'IM output is not a number: {0!r}', out_str)
return
log.debug(u'compare PHASH score is {0}', phash_diff)
log.debug(u'ImageMagick compare score: {0}', phash_diff)
return phash_diff <= compare_threshold
return True
@ -165,18 +187,18 @@ def check_art_similarity(log, item, imagepath, compare_threshold):
def extract(log, outpath, item):
art = get_art(log, item)
outpath = bytestring_path(outpath)
if not art:
log.info(u'No album art present in {0}, skipping.', item)
return
# Add an extension to the filename.
ext = imghdr.what(None, h=art)
ext = mediafile.image_extension(art)
if not ext:
log.warning(u'Unknown image type in {0}.',
displayable_path(item.path))
return
outpath += b'.' + ext
outpath += bytestring_path('.' + ext)
log.info(u'Extracting album art from: {0} to: {1}',
item, displayable_path(outpath))

View file

@ -23,7 +23,7 @@ from beets import config
# Parts of external interface.
from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch # noqa
from .match import tag_item, tag_album # noqa
from .match import tag_item, tag_album, Proposal # noqa
from .match import Recommendation # noqa
# Global logger.
@ -40,10 +40,21 @@ def apply_item_metadata(item, track_info):
item.artist_credit = track_info.artist_credit
item.title = track_info.title
item.mb_trackid = track_info.track_id
item.mb_releasetrackid = track_info.release_track_id
if track_info.artist_id:
item.mb_artistid = track_info.artist_id
if track_info.data_source:
item.data_source = track_info.data_source
if track_info.lyricist is not None:
item.lyricist = track_info.lyricist
if track_info.composer is not None:
item.composer = track_info.composer
if track_info.composer_sort is not None:
item.composer_sort = track_info.composer_sort
if track_info.arranger is not None:
item.arranger = track_info.arranger
# At the moment, the other metadata is left intact (including album
# and track number). Perhaps these should be emptied?
@ -52,13 +63,20 @@ def apply_metadata(album_info, mapping):
"""Set the items' metadata to match an AlbumInfo object using a
mapping from Items to TrackInfo objects.
"""
for item, track_info in mapping.iteritems():
# Album, artist, track count.
if track_info.artist:
item.artist = track_info.artist
for item, track_info in mapping.items():
# Artist or artist credit.
if config['artist_credit']:
item.artist = (track_info.artist_credit or
track_info.artist or
album_info.artist_credit or
album_info.artist)
item.albumartist = (album_info.artist_credit or
album_info.artist)
else:
item.artist = album_info.artist
item.artist = (track_info.artist or album_info.artist)
item.albumartist = album_info.artist
# Album.
item.album = album_info.album
# Artist sort and credit names.
@ -97,8 +115,9 @@ def apply_metadata(album_info, mapping):
if config['per_disc_numbering']:
# We want to let the track number be zero, but if the medium index
# is not provided we need to fall back to the overall index.
if track_info.medium_index is not None:
item.track = track_info.medium_index
if item.track is None:
else:
item.track = track_info.index
item.tracktotal = track_info.medium_total or len(album_info.tracks)
else:
@ -111,6 +130,7 @@ def apply_metadata(album_info, mapping):
# MusicBrainz IDs.
item.mb_trackid = track_info.track_id
item.mb_releasetrackid = track_info.release_track_id
item.mb_albumid = album_info.album_id
if track_info.artist_id:
item.mb_artistid = track_info.artist_id
@ -141,3 +161,14 @@ def apply_metadata(album_info, mapping):
if track_info.media is not None:
item.media = track_info.media
if track_info.lyricist is not None:
item.lyricist = track_info.lyricist
if track_info.composer is not None:
item.composer = track_info.composer
if track_info.composer_sort is not None:
item.composer_sort = track_info.composer_sort
if track_info.arranger is not None:
item.arranger = track_info.arranger
item.track_alt = track_info.track_alt

View file

@ -17,14 +17,17 @@
from __future__ import division, absolute_import, print_function
from collections import namedtuple
from functools import total_ordering
import re
from beets import logging
from beets import plugins
from beets import config
from beets.util import as_string
from beets.autotag import mb
from jellyfish import levenshtein_distance
from unidecode import unidecode
import six
log = logging.getLogger('beets')
@ -104,7 +107,7 @@ class AlbumInfo(object):
# Work around a bug in python-musicbrainz-ngs that causes some
# strings to be bytes rather than Unicode.
# https://github.com/alastair/python-musicbrainz-ngs/issues/85
def decode(self, codec='utf8'):
def decode(self, codec='utf-8'):
"""Ensure that all string attributes on this object, and the
constituent `TrackInfo` objects, are decoded to Unicode.
"""
@ -126,6 +129,8 @@ class TrackInfo(object):
- ``title``: name of the track
- ``track_id``: MusicBrainz ID; UUID fragment only
- ``release_track_id``: MusicBrainz ID respective to a track on a
particular release; UUID fragment only
- ``artist``: individual track artist name
- ``artist_id``
- ``length``: float: duration of the track in seconds
@ -139,18 +144,25 @@ class TrackInfo(object):
- ``artist_credit``: Recording-specific artist name
- ``data_source``: The original data source (MusicBrainz, Discogs, etc.)
- ``data_url``: The data source release URL.
- ``lyricist``: individual track lyricist name
- ``composer``: individual track composer name
- ``composer_sort``: individual track composer sort name
- ``arranger`: individual track arranger name
- ``track_alt``: alternative track number (tape, vinyl, etc.)
Only ``title`` and ``track_id`` are required. The rest of the fields
may be None. The indices ``index``, ``medium``, and ``medium_index``
are all 1-based.
"""
def __init__(self, title, track_id, artist=None, artist_id=None,
length=None, index=None, medium=None, medium_index=None,
medium_total=None, artist_sort=None, disctitle=None,
artist_credit=None, data_source=None, data_url=None,
media=None):
def __init__(self, title, track_id, release_track_id=None, artist=None,
artist_id=None, length=None, index=None, medium=None,
medium_index=None, medium_total=None, artist_sort=None,
disctitle=None, artist_credit=None, data_source=None,
data_url=None, media=None, lyricist=None, composer=None,
composer_sort=None, arranger=None, track_alt=None):
self.title = title
self.track_id = track_id
self.release_track_id = release_track_id
self.artist = artist
self.artist_id = artist_id
self.length = length
@ -164,9 +176,14 @@ class TrackInfo(object):
self.artist_credit = artist_credit
self.data_source = data_source
self.data_url = data_url
self.lyricist = lyricist
self.composer = composer
self.composer_sort = composer_sort
self.arranger = arranger
self.track_alt = track_alt
# As above, work around a bug in python-musicbrainz-ngs.
def decode(self, codec='utf8'):
def decode(self, codec='utf-8'):
"""Ensure that all string attributes on this object are decoded
to Unicode.
"""
@ -203,10 +220,10 @@ def _string_dist_basic(str1, str2):
transliteration/lowering to ASCII characters. Normalized by string
length.
"""
assert isinstance(str1, unicode)
assert isinstance(str2, unicode)
str1 = unidecode(str1).decode('ascii')
str2 = unidecode(str2).decode('ascii')
assert isinstance(str1, six.text_type)
assert isinstance(str2, six.text_type)
str1 = as_string(unidecode(str1))
str2 = as_string(unidecode(str2))
str1 = re.sub(r'[^a-z0-9]', '', str1.lower())
str2 = re.sub(r'[^a-z0-9]', '', str2.lower())
if not str1 and not str2:
@ -288,6 +305,8 @@ class LazyClassProperty(object):
return self.value
@total_ordering
@six.python_2_unicode_compatible
class Distance(object):
"""Keeps track of multiple distance penalties. Provides a single
weighted distance for all penalties as well as a weighted distance
@ -323,7 +342,7 @@ class Distance(object):
"""Return the maximum distance penalty (normalization factor).
"""
dist_max = 0.0
for key, penalty in self._penalties.iteritems():
for key, penalty in self._penalties.items():
dist_max += len(penalty) * self._weights[key]
return dist_max
@ -332,7 +351,7 @@ class Distance(object):
"""Return the raw (denormalized) distance.
"""
dist_raw = 0.0
for key, penalty in self._penalties.iteritems():
for key, penalty in self._penalties.items():
dist_raw += sum(penalty) * self._weights[key]
return dist_raw
@ -354,10 +373,16 @@ class Distance(object):
key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0])
)
def __hash__(self):
return id(self)
def __eq__(self, other):
return self.distance == other
# Behave like a float.
def __cmp__(self, other):
return cmp(self.distance, other)
def __lt__(self, other):
return self.distance < other
def __float__(self):
return self.distance
@ -368,7 +393,7 @@ class Distance(object):
def __rsub__(self, other):
return other - self.distance
def __unicode__(self):
def __str__(self):
return "{0:.2f}".format(self.distance)
# Behave like a dict.
@ -398,7 +423,7 @@ class Distance(object):
raise ValueError(
u'`dist` must be a Distance object, not {0}'.format(type(dist))
)
for key, penalties in dist._penalties.iteritems():
for key, penalties in dist._penalties.items():
self._penalties.setdefault(key, []).extend(penalties)
# Adding components.
@ -537,24 +562,27 @@ def track_for_mbid(recording_id):
def albums_for_id(album_id):
"""Get a list of albums for an ID."""
candidates = [album_for_mbid(album_id)]
plugin_albums = plugins.album_for_id(album_id)
for a in plugin_albums:
a = album_for_mbid(album_id)
if a:
yield a
for a in plugins.album_for_id(album_id):
if a:
plugins.send(u'albuminfo_received', info=a)
candidates.extend(plugin_albums)
return filter(None, candidates)
yield a
def tracks_for_id(track_id):
"""Get a list of tracks for an ID."""
candidates = [track_for_mbid(track_id)]
plugin_tracks = plugins.track_for_id(track_id)
for t in plugin_tracks:
t = track_for_mbid(track_id)
if t:
yield t
for t in plugins.track_for_id(track_id):
if t:
plugins.send(u'trackinfo_received', info=t)
candidates.extend(plugin_tracks)
return filter(None, candidates)
yield t
@plugins.notify_info_yielded(u'albuminfo_received')
def album_candidates(items, artist, album, va_likely):
"""Search for album matches. ``items`` is a list of Item objects
that make up the album. ``artist`` and ``album`` are the respective
@ -562,51 +590,42 @@ def album_candidates(items, artist, album, va_likely):
entered by the user. ``va_likely`` is a boolean indicating whether
the album is likely to be a "various artists" release.
"""
out = []
# Base candidates if we have album and artist to match.
if artist and album:
try:
out.extend(mb.match_album(artist, album, len(items)))
for candidate in mb.match_album(artist, album, len(items)):
yield candidate
except mb.MusicBrainzAPIError as exc:
exc.log(log)
# Also add VA matches from MusicBrainz where appropriate.
if va_likely and album:
try:
out.extend(mb.match_album(None, album, len(items)))
for candidate in mb.match_album(None, album, len(items)):
yield candidate
except mb.MusicBrainzAPIError as exc:
exc.log(log)
# Candidates from plugins.
out.extend(plugins.candidates(items, artist, album, va_likely))
# Notify subscribed plugins about fetched album info
for a in out:
plugins.send(u'albuminfo_received', info=a)
return out
for candidate in plugins.candidates(items, artist, album, va_likely):
yield candidate
@plugins.notify_info_yielded(u'trackinfo_received')
def item_candidates(item, artist, title):
"""Search for item matches. ``item`` is the Item to be matched.
``artist`` and ``title`` are strings and either reflect the item or
are specified by the user.
"""
out = []
# MusicBrainz candidates.
if artist and title:
try:
out.extend(mb.match_track(artist, title))
for candidate in mb.match_track(artist, title):
yield candidate
except mb.MusicBrainzAPIError as exc:
exc.log(log)
# Plugin candidates.
out.extend(plugins.item_candidates(item, artist, title))
# Notify subscribed plugins about fetched track info
for i in out:
plugins.send(u'trackinfo_received', info=i)
return out
for candidate in plugins.item_candidates(item, artist, title):
yield candidate

View file

@ -22,6 +22,7 @@ from __future__ import division, absolute_import, print_function
import datetime
import re
from munkres import Munkres
from collections import namedtuple
from beets import logging
from beets import plugins
@ -29,7 +30,6 @@ from beets import config
from beets.util import plurality
from beets.autotag import hooks
from beets.util.enumeration import OrderedEnum
from functools import reduce
# Artist signals that indicate "various artists". These are used at the
# album level to determine whether a given release is likely a VA
@ -53,6 +53,13 @@ class Recommendation(OrderedEnum):
strong = 3
# A structure for holding a set of possible matches to choose between. This
# consists of a list of possible candidates (i.e., AlbumInfo or TrackInfo
# objects) and a recommendation value.
Proposal = namedtuple('Proposal', ('candidates', 'recommendation'))
# Primary matching functionality.
def current_metadata(items):
@ -96,7 +103,9 @@ def assign_items(items, tracks):
costs.append(row)
# Find a minimum-cost bipartite matching.
log.debug('Computing track assignment...')
matching = Munkres().compute(costs)
log.debug('...done.')
# Produce the output matching.
mapping = dict((items[i], tracks[j]) for (i, j) in matching)
@ -238,7 +247,7 @@ def distance(items, album_info, mapping):
# Tracks.
dist.tracks = {}
for item, track in mapping.iteritems():
for item, track in mapping.items():
dist.tracks[track] = track_distance(item, track, album_info.va)
dist.add('tracks', dist.tracks[track].distance)
@ -261,19 +270,23 @@ def match_by_id(items):
AlbumInfo object for the corresponding album. Otherwise, returns
None.
"""
# Is there a consensus on the MB album ID?
albumids = [item.mb_albumid for item in items if item.mb_albumid]
if not albumids:
log.debug(u'No album IDs found.')
albumids = (item.mb_albumid for item in items if item.mb_albumid)
# Did any of the items have an MB album ID?
try:
first = next(albumids)
except StopIteration:
log.debug(u'No album ID found.')
return None
# If all album IDs are equal, look up the album.
if bool(reduce(lambda x, y: x if x == y else (), albumids)):
albumid = albumids[0]
log.debug(u'Searching for discovered album ID: {0}', albumid)
return hooks.album_for_mbid(albumid)
else:
# Is there a consensus on the MB album ID?
for other in albumids:
if other != first:
log.debug(u'No album ID consensus.')
return None
# If all album IDs are equal, look up the album.
log.debug(u'Searching for discovered album ID: {0}', first)
return hooks.album_for_mbid(first)
def _recommendation(results):
@ -312,10 +325,10 @@ def _recommendation(results):
keys = set(min_dist.keys())
if isinstance(results[0], hooks.AlbumMatch):
for track_dist in min_dist.tracks.values():
keys.update(track_dist.keys())
keys.update(list(track_dist.keys()))
max_rec_view = config['match']['max_rec']
for key in keys:
if key in max_rec_view.keys():
if key in list(max_rec_view.keys()):
max_rec = max_rec_view[key].as_choice({
'strong': Recommendation.strong,
'medium': Recommendation.medium,
@ -327,13 +340,19 @@ def _recommendation(results):
return rec
def _sort_candidates(candidates):
"""Sort candidates by distance."""
return sorted(candidates, key=lambda match: match.distance)
def _add_candidate(items, results, info):
"""Given a candidate AlbumInfo object, attempt to add the candidate
to the output dictionary of AlbumMatch objects. This involves
checking the track count, ordering the items, checking for
duplicates, and calculating the distance.
"""
log.debug(u'Candidate: {0} - {1}', info.artist, info.album)
log.debug(u'Candidate: {0} - {1} ({2})',
info.artist, info.album, info.album_id)
# Discard albums with zero tracks.
if not info.tracks:
@ -371,9 +390,8 @@ def _add_candidate(items, results, info):
def tag_album(items, search_artist=None, search_album=None,
search_ids=[]):
"""Return a tuple of a artist name, an album name, a list of
`AlbumMatch` candidates from the metadata backend, and a
`Recommendation`.
"""Return a tuple of the current artist name, the current album
name, and a `Proposal` containing `AlbumMatch` candidates.
The artist and album are the most common values of these fields
among `items`.
@ -401,10 +419,10 @@ def tag_album(items, search_artist=None, search_album=None,
# Search by explicit ID.
if search_ids:
search_cands = []
for search_id in search_ids:
log.debug(u'Searching for album ID: {0}', search_id)
search_cands.extend(hooks.albums_for_id(search_id))
for id_candidate in hooks.albums_for_id(search_id):
_add_candidate(items, candidates, id_candidate)
# Use existing metadata or text search.
else:
@ -412,7 +430,7 @@ def tag_album(items, search_artist=None, search_album=None,
id_info = match_by_id(items)
if id_info:
_add_candidate(items, candidates, id_info)
rec = _recommendation(candidates.values())
rec = _recommendation(list(candidates.values()))
log.debug(u'Album ID match recommendation is {0}', rec)
if candidates and not config['import']['timid']:
# If we have a very good MBID match, return immediately.
@ -420,7 +438,8 @@ def tag_album(items, search_artist=None, search_album=None,
# matches.
if rec == Recommendation.strong:
log.debug(u'ID match.')
return cur_artist, cur_album, candidates.values(), rec
return cur_artist, cur_album, \
Proposal(list(candidates.values()), rec)
# Search terms.
if not (search_artist and search_album):
@ -435,24 +454,25 @@ def tag_album(items, search_artist=None, search_album=None,
log.debug(u'Album might be VA: {0}', va_likely)
# Get the results from the data sources.
search_cands = hooks.album_candidates(items, search_artist,
search_album, va_likely)
log.debug(u'Evaluating {0} candidates.', len(search_cands))
for info in search_cands:
_add_candidate(items, candidates, info)
for matched_candidate in hooks.album_candidates(items,
search_artist,
search_album,
va_likely):
_add_candidate(items, candidates, matched_candidate)
log.debug(u'Evaluating {0} candidates.', len(candidates))
# Sort and get the recommendation.
candidates = sorted(candidates.itervalues())
candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates)
return cur_artist, cur_album, candidates, rec
return cur_artist, cur_album, Proposal(candidates, rec)
def tag_item(item, search_artist=None, search_title=None,
search_ids=[]):
"""Attempts to find metadata for a single track. Returns a
`(candidates, recommendation)` pair where `candidates` is a list of
TrackMatch objects. `search_artist` and `search_title` may be used
"""Find metadata for a single track. Return a `Proposal` consisting
of `TrackMatch` objects.
`search_artist` and `search_title` may be used
to override the current metadata for the purposes of the MusicBrainz
title. `search_ids` may be used for restricting the search to a list
of metadata backend IDs.
@ -462,7 +482,7 @@ def tag_item(item, search_artist=None, search_title=None,
candidates = {}
# First, try matching by MusicBrainz ID.
trackids = search_ids or filter(None, [item.mb_trackid])
trackids = search_ids or [t for t in [item.mb_trackid] if t]
if trackids:
for trackid in trackids:
log.debug(u'Searching for track ID: {0}', trackid)
@ -471,18 +491,18 @@ def tag_item(item, search_artist=None, search_title=None,
candidates[track_info.track_id] = \
hooks.TrackMatch(dist, track_info)
# If this is a good match, then don't keep searching.
rec = _recommendation(sorted(candidates.itervalues()))
rec = _recommendation(_sort_candidates(candidates.values()))
if rec == Recommendation.strong and \
not config['import']['timid']:
log.debug(u'Track ID match.')
return sorted(candidates.itervalues()), rec
return Proposal(_sort_candidates(candidates.values()), rec)
# If we're searching by ID, don't proceed.
if search_ids:
if candidates:
return sorted(candidates.itervalues()), rec
return Proposal(_sort_candidates(candidates.values()), rec)
else:
return [], Recommendation.none
return Proposal([], Recommendation.none)
# Search terms.
if not (search_artist and search_title):
@ -496,6 +516,6 @@ def tag_item(item, search_artist=None, search_title=None,
# Sort by distance and return with recommendation.
log.debug(u'Found {0} candidates.', len(candidates))
candidates = sorted(candidates.itervalues())
candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates)
return candidates, rec
return Proposal(candidates, rec)

View file

@ -20,17 +20,24 @@ from __future__ import division, absolute_import, print_function
import musicbrainzngs
import re
import traceback
from urlparse import urljoin
from six.moves.urllib.parse import urljoin
from beets import logging
import beets.autotag.hooks
import beets
from beets import util
from beets import config
import six
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
if util.SNI_SUPPORTED:
BASE_URL = 'https://musicbrainz.org/'
else:
BASE_URL = 'http://musicbrainz.org/'
SKIPPED_TRACKS = ['[data track]']
musicbrainzngs.set_useragent('beets', beets.__version__,
'http://beets.io/')
@ -53,8 +60,12 @@ class MusicBrainzAPIError(util.HumanReadableException):
log = logging.getLogger('beets')
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases']
'labels', 'artist-credits', 'aliases',
'recording-level-rels', 'work-rels',
'work-level-rels', 'artist-rels']
TRACK_INCLUDES = ['artists', 'aliases']
if 'work-level-rels' in musicbrainzngs.VALID_INCLUDES['recording']:
TRACK_INCLUDES += ['work-level-rels', 'artist-rels']
def track_url(trackid):
@ -69,7 +80,8 @@ def configure():
"""Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup.
"""
musicbrainzngs.set_hostname(config['musicbrainz']['host'].get(unicode))
hostname = config['musicbrainz']['host'].as_str()
musicbrainzngs.set_hostname(hostname)
musicbrainzngs.set_rate_limit(
config['musicbrainz']['ratelimit_interval'].as_number(),
config['musicbrainz']['ratelimit'].get(int),
@ -99,6 +111,24 @@ def _preferred_alias(aliases):
return matches[0]
def _preferred_release_event(release):
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
"""
countries = config['match']['preferred']['countries'].as_str_seq()
for country in countries:
for event in release.get('release-event-list', {}):
try:
if country in event['area']['iso-3166-1-code-list']:
return country, event['date']
except KeyError:
pass
return release.get('country'), release.get('date')
def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
@ -108,7 +138,7 @@ def _flatten_artist_credit(credit):
artist_sort_parts = []
artist_credit_parts = []
for el in credit:
if isinstance(el, basestring):
if isinstance(el, six.string_types):
# Join phrase.
artist_parts.append(el)
artist_credit_parts.append(el)
@ -177,6 +207,37 @@ def track_info(recording, index=None, medium=None, medium_index=None,
if recording.get('length'):
info.length = int(recording['length']) / (1000.0)
lyricist = []
composer = []
composer_sort = []
for work_relation in recording.get('work-relation-list', ()):
if work_relation['type'] != 'performance':
continue
for artist_relation in work_relation['work'].get(
'artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'lyricist':
lyricist.append(artist_relation['artist']['name'])
elif type == 'composer':
composer.append(artist_relation['artist']['name'])
composer_sort.append(
artist_relation['artist']['sort-name'])
if lyricist:
info.lyricist = u', '.join(lyricist)
if composer:
info.composer = u', '.join(composer)
info.composer_sort = u', '.join(composer_sort)
arranger = []
for artist_relation in recording.get('artist-relation-list', ()):
if 'type' in artist_relation:
type = artist_relation['type']
if type == 'arranger':
arranger.append(artist_relation['artist']['name'])
if arranger:
info.arranger = u', '.join(arranger)
info.decode()
return info
@ -216,11 +277,28 @@ def album_info(release):
disctitle = medium.get('title')
format = medium.get('format')
if format in config['match']['ignored_media'].as_str_seq():
continue
all_tracks = medium['track-list']
if 'data-track-list' in medium:
all_tracks += medium['data-track-list']
track_count = len(all_tracks)
if 'pregap' in medium:
all_tracks.insert(0, medium['pregap'])
for track in all_tracks:
if ('title' in track['recording'] and
track['recording']['title'] in SKIPPED_TRACKS):
continue
if ('video' in track['recording'] and
track['recording']['video'] == 'true' and
config['match']['ignore_video_tracks']):
continue
# Basic information from the recording.
index += 1
ti = track_info(
@ -228,10 +306,12 @@ def album_info(release):
index,
int(medium['position']),
int(track['position']),
len(medium['track-list']),
track_count,
)
ti.release_track_id = track['id']
ti.disctitle = disctitle
ti.media = format
ti.track_alt = track['number']
# Prefer track data, where present, over recording data.
if track.get('title'):
@ -260,10 +340,9 @@ def album_info(release):
)
info.va = info.artist_id == VARIOUS_ARTISTS_ID
if info.va:
info.artist = config['va_name'].get(unicode)
info.artist = config['va_name'].as_str()
info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id']
info.country = release.get('country')
info.albumstatus = release.get('status')
# Build up the disambiguation string from the release group and release.
@ -274,14 +353,28 @@ def album_info(release):
disambig.append(release.get('disambiguation'))
info.albumdisambig = u', '.join(disambig)
# Release type not always populated.
# Get the "classic" Release type. This data comes from a legacy API
# feature before MusicBrainz supported multiple release types.
if 'type' in release['release-group']:
reltype = release['release-group']['type']
if reltype:
info.albumtype = reltype.lower()
# Release dates.
release_date = release.get('date')
# Log the new-style "primary" and "secondary" release types.
# Eventually, we'd like to actually store this data, but we just log
# it for now to help understand the differences.
if 'primary-type' in release['release-group']:
rel_primarytype = release['release-group']['primary-type']
if rel_primarytype:
log.debug('primary MB release type: ' + rel_primarytype.lower())
if 'secondary-type-list' in release['release-group']:
if release['release-group']['secondary-type-list']:
log.debug('secondary MB release type(s): ' + ', '.join(
[secondarytype.lower() for secondarytype in
release['release-group']['secondary-type-list']]))
# Release events.
info.country, release_date = _preferred_release_event(release)
release_group_date = release['release-group'].get('first-release-date')
if not release_date:
# Fall back if release-specific date is not available.
@ -329,13 +422,14 @@ def match_album(artist, album, tracks=None):
# Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None:
criteria['tracks'] = unicode(tracks)
criteria['tracks'] = six.text_type(tracks)
# Abort if we have no search terms.
if not any(criteria.itervalues()):
if not any(criteria.values()):
return
try:
log.debug(u'Searching for MusicBrainz releases with: {!r}', criteria)
res = musicbrainzngs.search_releases(
limit=config['musicbrainz']['searchlimit'].get(int), **criteria)
except musicbrainzngs.MusicBrainzError as exc:
@ -358,7 +452,7 @@ def match_track(artist, title):
'recording': title.lower().strip(),
}
if not any(criteria.itervalues()):
if not any(criteria.values()):
return
try:
@ -376,7 +470,7 @@ def _parse_id(s):
no ID can be found, return None.
"""
# Find the first thing that looks like a UUID/MBID.
match = re.search(ur'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
match = re.search(u'[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match:
return match.group()
@ -386,6 +480,7 @@ def album_for_id(releaseid):
object or None if the album is not found. May raise a
MusicBrainzAPIError.
"""
log.debug(u'Requesting MusicBrainz release {}', releaseid)
albumid = _parse_id(releaseid)
if not albumid:
log.debug(u'Invalid MBID ({0}).', releaseid)

View file

@ -6,9 +6,12 @@ import:
copy: yes
move: no
link: no
hardlink: no
delete: no
resume: ask
incremental: no
incremental_skip_later: no
from_scratch: no
quiet_fallback: skip
none_rec_action: ask
timid: no
@ -23,6 +26,9 @@ import:
group_albums: no
pretend: false
search_ids: []
duplicate_action: ask
bell: no
set_fields: {}
clutter: ["Thumbs.DB", ".DS_Store"]
ignore: [".*", "*~", "System Volume Information", "lost+found"]
@ -36,6 +42,7 @@ replace:
'\.$': _
'\s+$': ''
'^\s+': ''
'^-': _
path_sep_replace: _
asciify_paths: false
art_filename: cover
@ -49,6 +56,7 @@ per_disc_numbering: no
verbose: 0
terminal_encoding:
original_date: no
artist_credit: no
id3v23: no
va_name: "Various Artists"
@ -120,5 +128,7 @@ match:
original_year: no
ignored: []
required: []
ignored_media: []
ignore_video_tracks: yes
track_length_grace: 10
track_length_max: 30

View file

@ -27,8 +27,19 @@ import collections
import beets
from beets.util.functemplate import Template
from beets.util import py3_path
from beets.dbcore import types
from .query import MatchQuery, NullSort, TrueQuery
import six
class DBAccessError(Exception):
"""The SQLite database became inaccessible.
This can happen when trying to read or write the database when, for
example, the database file is deleted or otherwise disappears. There
is probably no way to recover from this error.
"""
class FormattedMapping(collections.Mapping):
@ -66,10 +77,10 @@ class FormattedMapping(collections.Mapping):
def _get_formatted(self, model, key):
value = model._type(key).format(model.get(key))
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
value = value.decode('utf-8', 'ignore')
if self.for_path:
sep_repl = beets.config['path_sep_replace'].get(unicode)
sep_repl = beets.config['path_sep_replace'].as_str()
for sep in (os.path.sep, os.path.altsep):
if sep:
value = value.replace(sep, sep_repl)
@ -176,9 +187,9 @@ class Model(object):
ordinary construction are bypassed.
"""
obj = cls(db)
for key, value in fixed_values.iteritems():
for key, value in fixed_values.items():
obj._values_fixed[key] = cls._type(key).from_sql(value)
for key, value in flex_values.iteritems():
for key, value in flex_values.items():
obj._values_flex[key] = cls._type(key).from_sql(value)
return obj
@ -206,6 +217,21 @@ class Model(object):
if need_id and not self.id:
raise ValueError(u'{0} has no id'.format(type(self).__name__))
def copy(self):
"""Create a copy of the model object.
The field values and other state is duplicated, but the new copy
remains associated with the same database as the old object.
(A simple `copy.deepcopy` will not work because it would try to
duplicate the SQLite connection.)
"""
new = self.__class__()
new._db = self._db
new._values_fixed = self._values_fixed.copy()
new._values_flex = self._values_flex.copy()
new._dirty = self._dirty.copy()
return new
# Essential field accessors.
@classmethod
@ -225,14 +251,15 @@ class Model(object):
if key in getters: # Computed.
return getters[key](self)
elif key in self._fields: # Fixed.
return self._values_fixed.get(key)
return self._values_fixed.get(key, self._type(key).null)
elif key in self._values_flex: # Flexible.
return self._values_flex[key]
else:
raise KeyError(key)
def __setitem__(self, key, value):
"""Assign the value for a field.
def _setitem(self, key, value):
"""Assign the value for a field, return whether new and old value
differ.
"""
# Choose where to place the value.
if key in self._fields:
@ -246,9 +273,17 @@ class Model(object):
# Assign value and possibly mark as dirty.
old_value = source.get(key)
source[key] = value
if self._always_dirty or old_value != value:
changed = old_value != value
if self._always_dirty or changed:
self._dirty.add(key)
return changed
def __setitem__(self, key, value):
"""Assign the value for a field.
"""
self._setitem(key, value)
def __delitem__(self, key):
"""Remove a flexible attribute from the model.
"""
@ -267,9 +302,9 @@ class Model(object):
`computed` parameter controls whether computed (plugin-provided)
fields are included in the key list.
"""
base_keys = list(self._fields) + self._values_flex.keys()
base_keys = list(self._fields) + list(self._values_flex.keys())
if computed:
return base_keys + self._getters().keys()
return base_keys + list(self._getters().keys())
else:
return base_keys
@ -278,7 +313,7 @@ class Model(object):
"""Get a list of available keys for objects of this type.
Includes fixed and computed fields.
"""
return list(cls._fields) + cls._getters().keys()
return list(cls._fields) + list(cls._getters().keys())
# Act like a dictionary.
@ -340,15 +375,19 @@ class Model(object):
# Database interaction (CRUD methods).
def store(self):
def store(self, fields=None):
"""Save the object's metadata into the library database.
:param fields: the fields to be stored. If not specified, all fields
will be.
"""
if fields is None:
fields = self._fields
self._check_db()
# Build assignments for query.
assignments = []
subvars = []
for key in self._fields:
for key in fields:
if key != 'id' and key in self._dirty:
self._dirty.remove(key)
assignments.append(key + '=?')
@ -452,7 +491,7 @@ class Model(object):
separators will be added to the template.
"""
# Perform substitution.
if isinstance(template, basestring):
if isinstance(template, six.string_types):
template = Template(template)
return template.substitute(self.formatted(for_path),
self._template_funcs())
@ -463,7 +502,7 @@ class Model(object):
def _parse(cls, key, string):
"""Parse a string as a value for the given key.
"""
if not isinstance(string, basestring):
if not isinstance(string, six.string_types):
raise TypeError(u"_parse() argument must be a string")
return cls._type(key).parse(string)
@ -593,6 +632,11 @@ class Results(object):
return self._row_count
def __nonzero__(self):
"""Does this result contain any objects?
"""
return self.__bool__()
def __bool__(self):
"""Does this result contain any objects?
"""
return bool(len(self))
@ -669,8 +713,18 @@ class Transaction(object):
"""Execute an SQL statement with substitution values and return
the row ID of the last affected row.
"""
try:
cursor = self.db._connection().execute(statement, subvals)
return cursor.lastrowid
except sqlite3.OperationalError as e:
# In two specific cases, SQLite reports an error while accessing
# the underlying database file. We surface these exceptions as
# DBAccessError so the application can abort.
if e.args[0] in ("attempt to write a readonly database",
"unable to open database file"):
raise DBAccessError(e.args[0])
else:
raise
def script(self, statements):
"""Execute a string containing multiple SQL statements."""
@ -685,8 +739,9 @@ class Database(object):
"""The Model subclasses representing tables in this database.
"""
def __init__(self, path):
def __init__(self, path, timeout=5.0):
self.path = path
self.timeout = timeout
self._connections = {}
self._tx_stacks = defaultdict(list)
@ -721,18 +776,36 @@ class Database(object):
if thread_id in self._connections:
return self._connections[thread_id]
else:
# Make a new connection.
conn = self._create_connection()
self._connections[thread_id] = conn
return conn
def _create_connection(self):
"""Create a SQLite connection to the underlying database.
Makes a new connection every time. If you need to configure the
connection settings (e.g., add custom functions), override this
method.
"""
# Make a new connection. The `sqlite3` module can't use
# bytestring paths here on Python 3, so we need to
# provide a `str` using `py3_path`.
conn = sqlite3.connect(
self.path,
timeout=beets.config['timeout'].as_number(),
py3_path(self.path), timeout=self.timeout
)
# Access SELECT results like dictionaries.
conn.row_factory = sqlite3.Row
self._connections[thread_id] = conn
return conn
def _close(self):
"""Close the all connections to the underlying SQLite database
from all threads. This does not render the database object
unusable; new connections can still be opened on demand.
"""
with self._shared_map_lock:
self._connections.clear()
@contextlib.contextmanager
def _tx_stack(self):
"""A context manager providing access to the current thread's

View file

@ -23,6 +23,10 @@ from beets import util
from datetime import datetime, timedelta
import unicodedata
from functools import reduce
import six
if not six.PY2:
buffer = memoryview # sqlite won't accept memoryview in python 2
class ParsingError(ValueError):
@ -36,6 +40,7 @@ class InvalidQueryError(ParsingError):
The query should be a unicode string or a list, which will be space-joined.
"""
def __init__(self, query, explanation):
if isinstance(query, list):
query = " ".join(query)
@ -43,22 +48,24 @@ class InvalidQueryError(ParsingError):
super(InvalidQueryError, self).__init__(message)
class InvalidQueryArgumentTypeError(ParsingError):
class InvalidQueryArgumentValueError(ParsingError):
"""Represent a query argument that could not be converted as expected.
It exists to be caught in upper stack levels so a meaningful (i.e. with the
query) InvalidQueryError can be raised.
"""
def __init__(self, what, expected, detail=None):
message = u"'{0}' is not {1}".format(what, expected)
if detail:
message = u"{0}: {1}".format(message, detail)
super(InvalidQueryArgumentTypeError, self).__init__(message)
super(InvalidQueryArgumentValueError, self).__init__(message)
class Query(object):
"""An abstract class representing a query into the item database.
"""
def clause(self):
"""Generate an SQLite expression implementing the query.
@ -91,6 +98,7 @@ class FieldQuery(Query):
string. Subclasses may also provide `col_clause` to implement the
same matching functionality in SQLite.
"""
def __init__(self, field, pattern, fast=True):
self.field = field
self.pattern = pattern
@ -130,6 +138,7 @@ class FieldQuery(Query):
class MatchQuery(FieldQuery):
"""A query that looks for exact matches in an item field."""
def col_clause(self):
return self.field + " = ?", [self.pattern]
@ -139,6 +148,7 @@ class MatchQuery(FieldQuery):
class NoneQuery(FieldQuery):
"""A query that checks whether a field is null."""
def __init__(self, field, fast=True):
super(NoneQuery, self).__init__(field, None, fast)
@ -161,6 +171,7 @@ class StringFieldQuery(FieldQuery):
"""A FieldQuery that converts values to strings before matching
them.
"""
@classmethod
def value_match(cls, pattern, value):
"""Determine whether the value matches the pattern. The value
@ -178,6 +189,7 @@ class StringFieldQuery(FieldQuery):
class SubstringQuery(StringFieldQuery):
"""A query that matches a substring in a specific item field."""
def col_clause(self):
pattern = (self.pattern
.replace('\\', '\\\\')
@ -200,6 +212,7 @@ class RegexpQuery(StringFieldQuery):
Raises InvalidQueryError when the pattern is not a valid regular
expression.
"""
def __init__(self, field, pattern, fast=True):
super(RegexpQuery, self).__init__(field, pattern, fast)
pattern = self._normalize(pattern)
@ -207,7 +220,7 @@ class RegexpQuery(StringFieldQuery):
self.pattern = re.compile(self.pattern)
except re.error as exc:
# Invalid regular expression.
raise InvalidQueryArgumentTypeError(pattern,
raise InvalidQueryArgumentValueError(pattern,
u"a regular expression",
format(exc))
@ -227,9 +240,10 @@ class BooleanQuery(MatchQuery):
"""Matches a boolean field. Pattern should either be a boolean or a
string reflecting a boolean.
"""
def __init__(self, field, pattern, fast=True):
super(BooleanQuery, self).__init__(field, pattern, fast)
if isinstance(pattern, basestring):
if isinstance(pattern, six.string_types):
self.pattern = util.str2bool(pattern)
self.pattern = int(self.pattern)
@ -240,17 +254,16 @@ class BytesQuery(MatchQuery):
`unicode` equivalently in Python 2. Always use this query instead of
`MatchQuery` when matching on BLOB values.
"""
def __init__(self, field, pattern):
super(BytesQuery, self).__init__(field, pattern)
# Use a buffer representation of the pattern for SQLite
# Use a buffer/memoryview representation of the pattern for SQLite
# matching. This instructs SQLite to treat the blob as binary
# rather than encoded Unicode.
if isinstance(self.pattern, basestring):
# Implicitly coerce Unicode strings to their bytes
# equivalents.
if isinstance(self.pattern, unicode):
self.pattern = self.pattern.encode('utf8')
if isinstance(self.pattern, (six.text_type, bytes)):
if isinstance(self.pattern, six.text_type):
self.pattern = self.pattern.encode('utf-8')
self.buf_pattern = buffer(self.pattern)
elif isinstance(self.pattern, buffer):
self.buf_pattern = self.pattern
@ -268,6 +281,7 @@ class NumericQuery(FieldQuery):
Raises InvalidQueryError when the pattern does not represent an int or
a float.
"""
def _convert(self, s):
"""Convert a string to a numeric type (float or int).
@ -283,7 +297,7 @@ class NumericQuery(FieldQuery):
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentTypeError(s, u"an int or a float")
raise InvalidQueryArgumentValueError(s, u"an int or a float")
def __init__(self, field, pattern, fast=True):
super(NumericQuery, self).__init__(field, pattern, fast)
@ -304,7 +318,7 @@ class NumericQuery(FieldQuery):
if self.field not in item:
return False
value = item[self.field]
if isinstance(value, basestring):
if isinstance(value, six.string_types):
value = self._convert(value)
if self.point is not None:
@ -335,6 +349,7 @@ class CollectionQuery(Query):
"""An abstract query class that aggregates other queries. Can be
indexed like a list to access the sub-queries.
"""
def __init__(self, subqueries=()):
self.subqueries = subqueries
@ -387,6 +402,7 @@ class AnyFieldQuery(CollectionQuery):
any field. The individual field query class is provided to the
constructor.
"""
def __init__(self, pattern, fields, cls):
self.pattern = pattern
self.fields = fields
@ -422,6 +438,7 @@ class MutableCollectionQuery(CollectionQuery):
"""A collection query whose subqueries may be modified after the
query is initialized.
"""
def __setitem__(self, key, value):
self.subqueries[key] = value
@ -431,6 +448,7 @@ class MutableCollectionQuery(CollectionQuery):
class AndQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('and')
@ -440,6 +458,7 @@ class AndQuery(MutableCollectionQuery):
class OrQuery(MutableCollectionQuery):
"""A conjunction of a list of other queries."""
def clause(self):
return self.clause_with_joiner('or')
@ -451,6 +470,7 @@ class NotQuery(Query):
"""A query that matches the negation of its `subquery`, as a shorcut for
performing `not(subquery)` without using regular expressions.
"""
def __init__(self, subquery):
self.subquery = subquery
@ -479,6 +499,7 @@ class NotQuery(Query):
class TrueQuery(Query):
"""A query that always matches."""
def clause(self):
return '1', ()
@ -488,6 +509,7 @@ class TrueQuery(Query):
class FalseQuery(Query):
"""A query that never matches."""
def clause(self):
return '0', ()
@ -501,6 +523,10 @@ def _to_epoch_time(date):
"""Convert a `datetime` object to an integer number of seconds since
the (local) Unix epoch.
"""
if hasattr(date, 'timestamp'):
# The `timestamp` method exists on Python 3.3+.
return int(date.timestamp())
else:
epoch = datetime.fromtimestamp(0)
delta = date - epoch
return int(delta.total_seconds())
@ -527,12 +553,23 @@ class Period(object):
instants of time during January 2014.
"""
precisions = ('year', 'month', 'day')
date_formats = ('%Y', '%Y-%m', '%Y-%m-%d')
precisions = ('year', 'month', 'day', 'hour', 'minute', 'second')
date_formats = (
('%Y',), # year
('%Y-%m',), # month
('%Y-%m-%d',), # day
('%Y-%m-%dT%H', '%Y-%m-%d %H'), # hour
('%Y-%m-%dT%H:%M', '%Y-%m-%d %H:%M'), # minute
('%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S') # second
)
relative_units = {'y': 365, 'm': 30, 'w': 7, 'd': 1}
relative_re = '(?P<sign>[+|-]?)(?P<quantity>[0-9]+)' + \
'(?P<timespan>[y|m|w|d])'
def __init__(self, date, precision):
"""Create a period with the given date (a `datetime` object) and
precision (a string, one of "year", "month", or "day").
precision (a string, one of "year", "month", "day", "hour", "minute",
or "second").
"""
if precision not in Period.precisions:
raise ValueError(u'Invalid precision {0}'.format(precision))
@ -542,20 +579,55 @@ class Period(object):
@classmethod
def parse(cls, string):
"""Parse a date and return a `Period` object or `None` if the
string is empty.
string is empty, or raise an InvalidQueryArgumentValueError if
the string cannot be parsed to a date.
The date may be absolute or relative. Absolute dates look like
`YYYY`, or `YYYY-MM-DD`, or `YYYY-MM-DD HH:MM:SS`, etc. Relative
dates have three parts:
- Optionally, a ``+`` or ``-`` sign indicating the future or the
past. The default is the future.
- A number: how much to add or subtract.
- A letter indicating the unit: days, weeks, months or years
(``d``, ``w``, ``m`` or ``y``). A "month" is exactly 30 days
and a "year" is exactly 365 days.
"""
if not string:
return None
ordinal = string.count('-')
if ordinal >= len(cls.date_formats):
# Too many components.
return None
date_format = cls.date_formats[ordinal]
def find_date_and_format(string):
for ord, format in enumerate(cls.date_formats):
for format_option in format:
try:
date = datetime.strptime(string, date_format)
date = datetime.strptime(string, format_option)
return date, ord
except ValueError:
# Parsing failed.
pass
return (None, None)
if not string:
return None
# Check for a relative date.
match_dq = re.match(cls.relative_re, string)
if match_dq:
sign = match_dq.group('sign')
quantity = match_dq.group('quantity')
timespan = match_dq.group('timespan')
# Add or subtract the given amount of time from the current
# date.
multiplier = -1 if sign == '-' else 1
days = cls.relative_units[timespan]
date = datetime.now() + \
timedelta(days=int(quantity) * days) * multiplier
return cls(date, cls.precisions[5])
# Check for an absolute date.
date, ordinal = find_date_and_format(string)
if date is None:
raise InvalidQueryArgumentValueError(string,
'a valid date/time string')
precision = cls.precisions[ordinal]
return cls(date, precision)
@ -574,6 +646,12 @@ class Period(object):
return date.replace(year=date.year + 1, month=1)
elif 'day' == precision:
return date + timedelta(days=1)
elif 'hour' == precision:
return date + timedelta(hours=1)
elif 'minute' == precision:
return date + timedelta(minutes=1)
elif 'second' == precision:
return date + timedelta(seconds=1)
else:
raise ValueError(u'unhandled precision {0}'.format(precision))
@ -620,14 +698,17 @@ class DateQuery(FieldQuery):
The value of a date field can be matched against a date interval by
using an ellipsis interval syntax similar to that of NumericQuery.
"""
def __init__(self, field, pattern, fast=True):
super(DateQuery, self).__init__(field, pattern, fast)
start, end = _parse_periods(pattern)
self.interval = DateInterval.from_periods(start, end)
def match(self, item):
if self.field not in item:
return False
timestamp = float(item[self.field])
date = datetime.utcfromtimestamp(timestamp)
date = datetime.fromtimestamp(timestamp)
return self.interval.contains(date)
_clause_tmpl = "{0} {1} ?"
@ -661,6 +742,7 @@ class DurationQuery(NumericQuery):
Raises InvalidQueryError when the pattern does not represent an int, float
or M:SS time interval.
"""
def _convert(self, s):
"""Convert a M:SS or numeric string to a float.
@ -675,7 +757,7 @@ class DurationQuery(NumericQuery):
try:
return float(s)
except ValueError:
raise InvalidQueryArgumentTypeError(
raise InvalidQueryArgumentValueError(
s,
u"a M:SS string or a float")
@ -783,6 +865,7 @@ class FieldSort(Sort):
"""An abstract sort criterion that orders by a specific field (of
any kind).
"""
def __init__(self, field, ascending=True, case_insensitive=True):
self.field = field
self.ascending = ascending
@ -795,7 +878,7 @@ class FieldSort(Sort):
def key(item):
field_val = item.get(self.field, '')
if self.case_insensitive and isinstance(field_val, unicode):
if self.case_insensitive and isinstance(field_val, six.text_type):
field_val = field_val.lower()
return field_val
@ -820,6 +903,7 @@ class FieldSort(Sort):
class FixedFieldSort(FieldSort):
"""Sort object to sort on a fixed field.
"""
def order_clause(self):
order = "ASC" if self.ascending else "DESC"
if self.case_insensitive:
@ -836,12 +920,14 @@ class SlowFieldSort(FieldSort):
"""A sort criterion by some model field other than a fixed field:
i.e., a computed or flexible field.
"""
def is_slow(self):
return True
class NullSort(Sort):
"""No sorting. Leave results unsorted."""
def sort(self, items):
return items

View file

@ -19,6 +19,10 @@ from __future__ import division, absolute_import, print_function
from . import query
from beets.util import str2bool
import six
if not six.PY2:
buffer = memoryview # sqlite won't accept memoryview in python 2
# Abstract base.
@ -37,7 +41,7 @@ class Type(object):
"""The `Query` subclass to be used when querying the field.
"""
model_type = unicode
model_type = six.text_type
"""The Python type that is used to represent the value in the model.
The model is guaranteed to return a value of this type if the field
@ -61,9 +65,9 @@ class Type(object):
if value is None:
value = u''
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
value = value.decode('utf-8', 'ignore')
return unicode(value)
return six.text_type(value)
def parse(self, string):
"""Parse a (possibly human-written) string and return the
@ -97,12 +101,12 @@ class Type(object):
https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types
Flexible fields have the type affinity `TEXT`. This means the
`sql_value` is either a `buffer` or a `unicode` object` and the
method must handle these in addition.
`sql_value` is either a `buffer`/`memoryview` or a `unicode` object`
and the method must handle these in addition.
"""
if isinstance(sql_value, buffer):
sql_value = bytes(sql_value).decode('utf8', 'ignore')
if isinstance(sql_value, unicode):
sql_value = bytes(sql_value).decode('utf-8', 'ignore')
if isinstance(sql_value, six.text_type):
return self.parse(sql_value)
else:
return self.normalize(sql_value)
@ -194,7 +198,7 @@ class Boolean(Type):
model_type = bool
def format(self, value):
return unicode(bool(value))
return six.text_type(bool(value))
def parse(self, string):
return str2bool(string)

View file

@ -37,14 +37,13 @@ from beets import dbcore
from beets import plugins
from beets import util
from beets import config
from beets.util import pipeline, sorted_walk, ancestry
from beets.util import pipeline, sorted_walk, ancestry, MoveOperation
from beets.util import syspath, normpath, displayable_path
from enum import Enum
from beets import mediafile
action = Enum('action',
['SKIP', 'ASIS', 'TRACKS', 'MANUAL', 'APPLY', 'MANUAL_ID',
'ALBUMS', 'RETAG'])
['SKIP', 'ASIS', 'TRACKS', 'APPLY', 'ALBUMS', 'RETAG'])
# The RETAG action represents "don't apply any match, but do record
# new metadata". It's not reachable via the standard command prompt but
# can be used by plugins.
@ -69,7 +68,7 @@ class ImportAbort(Exception):
def _open_state():
"""Reads the state file, returning a dictionary."""
try:
with open(config['statefile'].as_filename()) as f:
with open(config['statefile'].as_filename(), 'rb') as f:
return pickle.load(f)
except Exception as exc:
# The `pickle` module can emit all sorts of exceptions during
@ -83,7 +82,7 @@ def _open_state():
def _save_state(state):
"""Writes the state dictionary out to disk."""
try:
with open(config['statefile'].as_filename(), 'w') as f:
with open(config['statefile'].as_filename(), 'wb') as f:
pickle.dump(state, f)
except IOError as exc:
log.error(u'state file could not be written: {0}', exc)
@ -189,6 +188,8 @@ class ImportSession(object):
self.paths = paths
self.query = query
self._is_resuming = dict()
self._merged_items = set()
self._merged_dirs = set()
# Normalize the paths.
if self.paths:
@ -221,13 +222,19 @@ class ImportSession(object):
iconfig['resume'] = False
iconfig['incremental'] = False
# Copy, move, and link are mutually exclusive.
# Copy, move, link, and hardlink are mutually exclusive.
if iconfig['move']:
iconfig['copy'] = False
iconfig['link'] = False
iconfig['hardlink'] = False
elif iconfig['link']:
iconfig['copy'] = False
iconfig['move'] = False
iconfig['hardlink'] = False
elif iconfig['hardlink']:
iconfig['copy'] = False
iconfig['move'] = False
iconfig['link'] = False
# Only delete when copying.
if not iconfig['copy']:
@ -306,6 +313,8 @@ class ImportSession(object):
stages += [import_asis(self)]
# Plugin stages.
for stage_func in plugins.early_import_stages():
stages.append(plugin_stage(self, stage_func))
for stage_func in plugins.import_stages():
stages.append(plugin_stage(self, stage_func))
@ -331,7 +340,7 @@ class ImportSession(object):
been imported in a previous session.
"""
if self.is_resuming(toppath) \
and all(map(lambda p: progress_element(toppath, p), paths)):
and all([progress_element(toppath, p) for p in paths]):
return True
if self.config['incremental'] \
and tuple(paths) in self.history_dirs:
@ -345,6 +354,24 @@ class ImportSession(object):
self._history_dirs = history_get()
return self._history_dirs
def already_merged(self, paths):
"""Returns true if all the paths being imported were part of a merge
during previous tasks.
"""
for path in paths:
if path not in self._merged_items \
and path not in self._merged_dirs:
return False
return True
def mark_merged(self, paths):
"""Mark paths and directories as merged for future reimport tasks.
"""
self._merged_items.update(paths)
dirs = set([os.path.dirname(path) if os.path.isfile(path) else path
for path in paths])
self._merged_dirs.update(dirs)
def is_resuming(self, toppath):
"""Return `True` if user wants to resume import of this path.
@ -362,7 +389,7 @@ class ImportSession(object):
# Either accept immediately or prompt for input to decide.
if self.want_resume is True or \
self.should_resume(toppath):
log.warn(u'Resuming interrupted import of {0}',
log.warning(u'Resuming interrupted import of {0}',
util.displayable_path(toppath))
self._is_resuming[toppath] = True
else:
@ -424,6 +451,9 @@ class ImportTask(BaseImportTask):
* `manipulate_files()` Copy, move, and write files depending on the
session configuration.
* `set_fields()` Sets the fields given at CLI or configuration to
the specified values.
* `finalize()` Update the import progress and cleanup the file
system.
"""
@ -435,6 +465,7 @@ class ImportTask(BaseImportTask):
self.candidates = []
self.rec = None
self.should_remove_duplicates = False
self.should_merge_duplicates = False
self.is_album = True
self.search_ids = [] # user-supplied candidate IDs.
@ -443,7 +474,6 @@ class ImportTask(BaseImportTask):
indicates that an action has been selected for this task.
"""
# Not part of the task structure:
assert choice not in (action.MANUAL, action.MANUAL_ID)
assert choice != action.APPLY # Only used internally.
if choice in (action.SKIP, action.ASIS, action.TRACKS, action.ALBUMS,
action.RETAG):
@ -499,13 +529,17 @@ class ImportTask(BaseImportTask):
if self.choice_flag in (action.ASIS, action.RETAG):
return list(self.items)
elif self.choice_flag == action.APPLY:
return self.match.mapping.keys()
return list(self.match.mapping.keys())
else:
assert False
def apply_metadata(self):
"""Copy metadata from match info to the items.
"""
if config['import']['from_scratch']:
for item in self.match.mapping:
item.clear()
autotag.apply_metadata(self.match.info, self.match.mapping)
def duplicate_items(self, lib):
@ -526,13 +560,29 @@ class ImportTask(BaseImportTask):
util.prune_dirs(os.path.dirname(item.path),
lib.directory)
def set_fields(self):
"""Sets the fields given at CLI or configuration to the specified
values.
"""
for field, view in config['import']['set_fields'].items():
value = view.get()
log.debug(u'Set field {1}={2} for {0}',
displayable_path(self.paths),
field,
value)
self.album[field] = value
self.album.store()
def finalize(self, session):
"""Save progress, clean up files, and emit plugin event.
"""
# Update progress.
if session.want_resume:
self.save_progress()
if session.config['incremental']:
if session.config['incremental'] and not (
# Should we skip recording to incremental list?
self.skip and session.config['incremental_skip_later']
):
self.save_history()
self.cleanup(copy=session.config['copy'],
@ -587,12 +637,12 @@ class ImportTask(BaseImportTask):
candidate IDs are stored in self.search_ids: if present, the
initial lookup is restricted to only those IDs.
"""
artist, album, candidates, recommendation = \
artist, album, prop = \
autotag.tag_album(self.items, search_ids=self.search_ids)
self.cur_artist = artist
self.cur_album = album
self.candidates = candidates
self.rec = recommendation
self.candidates = prop.candidates
self.rec = prop.recommendation
def find_duplicates(self, lib):
"""Return a list of albums from `lib` with the same artist and
@ -612,10 +662,11 @@ class ImportTask(BaseImportTask):
))
for album in lib.albums(duplicate_query):
# Check whether the album is identical in contents, in which
# case it is not a duplicate (will be replaced).
# Check whether the album paths are all present in the task
# i.e. album is being completely re-imported by the task,
# in which case it is not a duplicate (will be replaced).
album_paths = set(i.path for i in album.items())
if album_paths != task_paths:
if not (album_paths <= task_paths):
duplicates.append(album)
return duplicates
@ -640,7 +691,7 @@ class ImportTask(BaseImportTask):
changes['comp'] = False
else:
# VA.
changes['albumartist'] = config['va_name'].get(unicode)
changes['albumartist'] = config['va_name'].as_str()
changes['comp'] = True
elif self.choice_flag in (action.APPLY, action.RETAG):
@ -655,20 +706,28 @@ class ImportTask(BaseImportTask):
for item in self.items:
item.update(changes)
def manipulate_files(self, move=False, copy=False, write=False,
link=False, session=None):
def manipulate_files(self, operation=None, write=False, session=None):
""" Copy, move, link or hardlink (depending on `operation`) the files
as well as write metadata.
`operation` should be an instance of `util.MoveOperation`.
If `write` is `True` metadata is written to the files.
"""
items = self.imported_items()
# Save the original paths of all items for deletion and pruning
# in the next step (finalization).
self.old_paths = [item.path for item in items]
for item in items:
if move or copy or link:
if operation is not None:
# In copy and link modes, treat re-imports specially:
# move in-library files. (Out-of-library files are
# copied/moved as usual).
old_path = item.path
if (copy or link) and self.replaced_items[item] and \
session.lib.directory in util.ancestry(old_path):
if (operation != MoveOperation.MOVE
and self.replaced_items[item]
and session.lib.directory in util.ancestry(old_path)):
item.move()
# We moved the item, so remove the
# now-nonexistent file from old_paths.
@ -676,7 +735,7 @@ class ImportTask(BaseImportTask):
else:
# A normal import. Just copy files and keep track of
# old paths.
item.move(copy, link)
item.move(operation)
if write and (self.apply or self.choice_flag == action.RETAG):
item.try_write()
@ -830,10 +889,9 @@ class SingletonImportTask(ImportTask):
plugins.send('item_imported', lib=lib, item=item)
def lookup_candidates(self):
candidates, recommendation = autotag.tag_item(
self.item, search_ids=self.search_ids)
self.candidates = candidates
self.rec = recommendation
prop = autotag.tag_item(self.item, search_ids=self.search_ids)
self.candidates = prop.candidates
self.rec = prop.recommendation
def find_duplicates(self, lib):
"""Return a list of items from `lib` that have the same artist
@ -874,6 +932,19 @@ class SingletonImportTask(ImportTask):
def reload(self):
self.item.load()
def set_fields(self):
"""Sets the fields given at CLI or configuration to the specified
values.
"""
for field, view in config['import']['set_fields'].items():
value = view.get()
log.debug(u'Set field {1}={2} for {0}',
displayable_path(self.paths),
field,
value)
self.item[field] = value
self.item.store()
# FIXME The inheritance relationships are inverted. This is why there
# are so many methods which pass. More responsibility should be delegated to
@ -944,7 +1015,7 @@ class ArchiveImportTask(SentinelImportTask):
return False
for path_test, _ in cls.handlers():
if path_test(path):
if path_test(util.py3_path(path)):
return True
return False
@ -985,12 +1056,12 @@ class ArchiveImportTask(SentinelImportTask):
`toppath` to that directory.
"""
for path_test, handler_class in self.handlers():
if path_test(self.toppath):
if path_test(util.py3_path(self.toppath)):
break
try:
extract_to = mkdtemp()
archive = handler_class(self.toppath, mode='r')
archive = handler_class(util.py3_path(self.toppath), mode='r')
archive.extractall(extract_to)
finally:
archive.close()
@ -1148,7 +1219,7 @@ class ImportTaskFactory(object):
if not (self.session.config['move'] or
self.session.config['copy']):
log.warn(u"Archive importing requires either "
log.warning(u"Archive importing requires either "
u"'copy' or 'move' to be enabled.")
return
@ -1179,12 +1250,33 @@ class ImportTaskFactory(object):
# Silently ignore non-music files.
pass
elif isinstance(exc.reason, mediafile.UnreadableFileError):
log.warn(u'unreadable file: {0}', displayable_path(path))
log.warning(u'unreadable file: {0}', displayable_path(path))
else:
log.error(u'error reading {0}: {1}',
displayable_path(path), exc)
# Pipeline utilities
def _freshen_items(items):
# Clear IDs from re-tagged items so they appear "fresh" when
# we add them back to the library.
for item in items:
item.id = None
item.album_id = None
def _extend_pipeline(tasks, *stages):
# Return pipeline extension for stages with list of tasks
if type(tasks) == list:
task_iter = iter(tasks)
else:
task_iter = tasks
ipl = pipeline.Pipeline([task_iter] + list(stages))
return pipeline.multiple(ipl.pull())
# Full-album pipeline stages.
def read_tasks(session):
@ -1204,7 +1296,7 @@ def read_tasks(session):
skipped += task_factory.skipped
if not task_factory.imported:
log.warn(u'No files imported from {0}',
log.warning(u'No files imported from {0}',
displayable_path(toppath))
# Show skipped directories (due to incremental/resume).
@ -1230,12 +1322,7 @@ def query_tasks(session):
log.debug(u'yielding album {0}: {1} - {2}',
album.id, album.albumartist, album.album)
items = list(album.items())
# Clear IDs from re-tagged items so they appear "fresh" when
# we add them back to the library.
for item in items:
item.id = None
item.album_id = None
_freshen_items(items)
task = ImportTask(None, [album.item_dir()], items)
for task in task.handle_created(session):
@ -1281,6 +1368,9 @@ def user_query(session, task):
if task.skip:
return task
if session.already_merged(task.paths):
return pipeline.BUBBLE
# Ask the user for a choice.
task.choose_match(session)
plugins.send('import_task_choice', session=session, task=task)
@ -1295,24 +1385,38 @@ def user_query(session, task):
yield new_task
yield SentinelImportTask(task.toppath, task.paths)
ipl = pipeline.Pipeline([
emitter(task),
return _extend_pipeline(emitter(task),
lookup_candidates(session),
user_query(session),
])
return pipeline.multiple(ipl.pull())
user_query(session))
# As albums: group items by albums and create task for each album
if task.choice_flag is action.ALBUMS:
ipl = pipeline.Pipeline([
iter([task]),
return _extend_pipeline([task],
group_albums(session),
lookup_candidates(session),
user_query(session)
])
return pipeline.multiple(ipl.pull())
user_query(session))
resolve_duplicates(session, task)
if task.should_merge_duplicates:
# Create a new task for tagging the current items
# and duplicates together
duplicate_items = task.duplicate_items(session.lib)
# Duplicates would be reimported so make them look "fresh"
_freshen_items(duplicate_items)
duplicate_paths = [item.path for item in duplicate_items]
# Record merged paths in the session so they are not reimported
session.mark_merged(duplicate_paths)
merged_task = ImportTask(None, task.paths + duplicate_paths,
task.items + duplicate_items)
return _extend_pipeline([merged_task],
lookup_candidates(session),
user_query(session))
apply_choice(session, task)
return task
@ -1327,7 +1431,33 @@ def resolve_duplicates(session, task):
log.debug(u'found duplicates: {}'.format(
[o.id for o in found_duplicates]
))
# Get the default action to follow from config.
duplicate_action = config['import']['duplicate_action'].as_choice({
u'skip': u's',
u'keep': u'k',
u'remove': u'r',
u'merge': u'm',
u'ask': u'a',
})
log.debug(u'default action for duplicates: {0}', duplicate_action)
if duplicate_action == u's':
# Skip new.
task.set_choice(action.SKIP)
elif duplicate_action == u'k':
# Keep both. Do nothing; leave the choice intact.
pass
elif duplicate_action == u'r':
# Remove old.
task.should_remove_duplicates = True
elif duplicate_action == u'm':
# Merge duplicates together
task.should_merge_duplicates = True
else:
# No default action set; ask the session.
session.resolve_duplicate(task, found_duplicates)
session.log_choice(task, True)
@ -1360,6 +1490,14 @@ def apply_choice(session, task):
task.add(session.lib)
# If ``set_fields`` is set, set those fields to the
# configured values.
# NOTE: This cannot be done before the ``task.add()`` call above,
# because then the ``ImportTask`` won't have an `album` for which
# it can set the fields.
if config['import']['set_fields']:
task.set_fields()
@pipeline.mutator_stage
def plugin_stage(session, func, task):
@ -1388,11 +1526,20 @@ def manipulate_files(session, task):
if task.should_remove_duplicates:
task.remove_duplicates(session.lib)
if session.config['move']:
operation = MoveOperation.MOVE
elif session.config['copy']:
operation = MoveOperation.COPY
elif session.config['link']:
operation = MoveOperation.LINK
elif session.config['hardlink']:
operation = MoveOperation.HARDLINK
else:
operation = None
task.manipulate_files(
move=session.config['move'],
copy=session.config['copy'],
operation,
write=session.config['write'],
link=session.config['link'],
session=session,
)
@ -1439,8 +1586,16 @@ def group_albums(session):
task = pipeline.multiple(tasks)
MULTIDISC_MARKERS = (r'dis[ck]', r'cd')
MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d'
MULTIDISC_MARKERS = (br'dis[ck]', br'cd')
MULTIDISC_PAT_FMT = br'^(.*%s[\W_]*)\d'
def is_subdir_of_any_in_list(path, dirs):
"""Returns True if path os a subdirectory of any directory in dirs
(a list). In other case, returns False.
"""
ancestors = ancestry(path)
return any(d in ancestors for d in dirs)
def albums_in_dir(path):
@ -1462,7 +1617,7 @@ def albums_in_dir(path):
# and add the current directory. If so, just add the directory
# and move on to the next directory. If not, stop collapsing.
if collapse_paths:
if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \
if (is_subdir_of_any_in_list(root, collapse_paths)) or \
(collapse_pat and
collapse_pat.match(os.path.basename(root))):
# Still collapsing.
@ -1483,7 +1638,9 @@ def albums_in_dir(path):
# named in this way.
start_collapsing = False
for marker in MULTIDISC_MARKERS:
marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I)
# We're using replace on %s due to lack of .format() on bytestrings
p = MULTIDISC_PAT_FMT.replace(b'%s', marker)
marker_pat = re.compile(p, re.I)
match = marker_pat.match(os.path.basename(root))
# Is this directory the root of a nested multi-disc album?
@ -1492,13 +1649,16 @@ def albums_in_dir(path):
start_collapsing = True
subdir_pat = None
for subdir in dirs:
subdir = util.bytestring_path(subdir)
# The first directory dictates the pattern for
# the remaining directories.
if not subdir_pat:
match = marker_pat.match(subdir)
if match:
match_group = re.escape(match.group(1))
subdir_pat = re.compile(
br'^%s\d' % re.escape(match.group(1)), re.I
b''.join([b'^', match_group, br'\d']),
re.I
)
else:
start_collapsing = False
@ -1520,7 +1680,8 @@ def albums_in_dir(path):
# Set the current pattern to match directories with the same
# prefix as this one, followed by a digit.
collapse_pat = re.compile(
br'^%s\d' % re.escape(match.group(1)), re.I
b''.join([b'^', re.escape(match.group(1)), br'\d']),
re.I
)
break

View file

@ -22,18 +22,27 @@ import sys
import unicodedata
import time
import re
from unidecode import unidecode
import six
from beets import logging
from beets.mediafile import MediaFile, MutagenError, UnreadableFileError
from beets.mediafile import MediaFile, UnreadableFileError
from beets import plugins
from beets import util
from beets.util import bytestring_path, syspath, normpath, samefile
from beets.util import bytestring_path, syspath, normpath, samefile, \
MoveOperation
from beets.util.functemplate import Template
from beets import dbcore
from beets.dbcore import types
import beets
# To use the SQLite "blob" type, it doesn't suffice to provide a byte
# string; SQLite treats that as encoded text. Wrapping it in a `buffer` or a
# `memoryview`, depending on the Python version, tells it that we
# actually mean non-text data.
if six.PY2:
BLOB_TYPE = buffer # noqa: F821
else:
BLOB_TYPE = memoryview
log = logging.getLogger('beets')
@ -48,9 +57,6 @@ class PathQuery(dbcore.FieldQuery):
and case-sensitive otherwise.
"""
escape_re = re.compile(r'[\\_%]')
escape_char = b'\\'
def __init__(self, field, pattern, fast=True, case_sensitive=None):
"""Create a path query. `pattern` must be a path, either to a
file or a directory.
@ -85,28 +91,31 @@ class PathQuery(dbcore.FieldQuery):
colon = query_part.find(':')
if colon != -1:
query_part = query_part[:colon]
return (os.sep in query_part and
os.path.exists(syspath(normpath(query_part))))
# Test both `sep` and `altsep` (i.e., both slash and backslash on
# Windows).
return (
(os.sep in query_part or
(os.altsep and os.altsep in query_part)) and
os.path.exists(syspath(normpath(query_part)))
)
def match(self, item):
path = item.path if self.case_sensitive else item.path.lower()
return (path == self.file_path) or path.startswith(self.dir_path)
def col_clause(self):
if self.case_sensitive:
file_blob = buffer(self.file_path)
dir_blob = buffer(self.dir_path)
return '({0} = ?) || (substr({0}, 1, ?) = ?)'.format(self.field), \
(file_blob, len(dir_blob), dir_blob)
file_blob = BLOB_TYPE(self.file_path)
dir_blob = BLOB_TYPE(self.dir_path)
escape = lambda m: self.escape_char + m.group(0)
dir_pattern = self.escape_re.sub(escape, self.dir_path)
dir_blob = buffer(dir_pattern + b'%')
file_pattern = self.escape_re.sub(escape, self.file_path)
file_blob = buffer(file_pattern)
return '({0} LIKE ? ESCAPE ?) || ({0} LIKE ? ESCAPE ?)'.format(
self.field), (file_blob, self.escape_char, dir_blob,
self.escape_char)
if self.case_sensitive:
query_part = '({0} = ?) || (substr({0}, 1, ?) = ?)'
else:
query_part = '(BYTELOWER({0}) = BYTELOWER(?)) || \
(substr(BYTELOWER({0}), 1, ?) = BYTELOWER(?))'
return query_part.format(self.field), \
(file_blob, len(dir_blob), dir_blob)
# Library-specific field types.
@ -117,14 +126,15 @@ class DateType(types.Float):
query = dbcore.query.DateQuery
def format(self, value):
return time.strftime(beets.config['time_format'].get(unicode),
return time.strftime(beets.config['time_format'].as_str(),
time.localtime(value or 0))
def parse(self, string):
try:
# Try a formatted date string.
return time.mktime(
time.strptime(string, beets.config['time_format'].get(unicode))
time.strptime(string,
beets.config['time_format'].as_str())
)
except ValueError:
# Fall back to a plain timestamp number.
@ -135,10 +145,27 @@ class DateType(types.Float):
class PathType(types.Type):
"""A dbcore type for filesystem paths. These are represented as
`bytes` objects, in keeping with the Unix filesystem abstraction.
"""
sql = u'BLOB'
query = PathQuery
model_type = bytes
def __init__(self, nullable=False):
"""Create a path type object. `nullable` controls whether the
type may be missing, i.e., None.
"""
self.nullable = nullable
@property
def null(self):
if self.nullable:
return None
else:
return b''
def format(self, value):
return util.displayable_path(value)
@ -146,12 +173,11 @@ class PathType(types.Type):
return normpath(bytestring_path(string))
def normalize(self, value):
if isinstance(value, unicode):
if isinstance(value, six.text_type):
# Paths stored internally as encoded bytes.
return bytestring_path(value)
elif isinstance(value, buffer):
# SQLite must store bytestings as buffers to avoid decoding.
elif isinstance(value, BLOB_TYPE):
# We unwrap buffers to bytes.
return bytes(value)
@ -163,7 +189,7 @@ class PathType(types.Type):
def to_sql(self, value):
if isinstance(value, bytes):
value = buffer(value)
value = BLOB_TYPE(value)
return value
@ -180,6 +206,8 @@ class MusicalKey(types.String):
r'bb': 'a#',
}
null = None
def parse(self, key):
key = key.lower()
for flat, sharp in self.ENHARMONIC.items():
@ -254,7 +282,7 @@ PF_KEY_DEFAULT = 'default'
# Exceptions.
@six.python_2_unicode_compatible
class FileOperationError(Exception):
"""Indicates an error when interacting with a file on disk.
Possibilities include an unsupported media type, a permissions
@ -268,35 +296,39 @@ class FileOperationError(Exception):
self.path = path
self.reason = reason
def __unicode__(self):
def text(self):
"""Get a string representing the error. Describes both the
underlying reason and the file path in question.
"""
return u'{0}: {1}'.format(
util.displayable_path(self.path),
unicode(self.reason)
six.text_type(self.reason)
)
def __str__(self):
return unicode(self).encode('utf8')
# define __str__ as text to avoid infinite loop on super() calls
# with @six.python_2_unicode_compatible
__str__ = text
@six.python_2_unicode_compatible
class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`).
"""
def __unicode__(self):
return u'error reading ' + super(ReadError, self).__unicode__()
def __str__(self):
return u'error reading ' + super(ReadError, self).text()
@six.python_2_unicode_compatible
class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`).
"""
def __unicode__(self):
return u'error writing ' + super(WriteError, self).__unicode__()
def __str__(self):
return u'error writing ' + super(WriteError, self).text()
# Item and Album model classes.
@six.python_2_unicode_compatible
class LibModel(dbcore.Model):
"""Shared concrete functionality for Items and Albums.
"""
@ -310,8 +342,8 @@ class LibModel(dbcore.Model):
funcs.update(plugins.template_funcs())
return funcs
def store(self):
super(LibModel, self).store()
def store(self, fields=None):
super(LibModel, self).store(fields)
plugins.send('database_change', lib=self._db, model=self)
def remove(self):
@ -324,20 +356,16 @@ class LibModel(dbcore.Model):
def __format__(self, spec):
if not spec:
spec = beets.config[self._format_config_key].get(unicode)
result = self.evaluate_template(spec)
if isinstance(spec, bytes):
# if spec is a byte string then we must return a one as well
return result.encode('utf8')
else:
return result
spec = beets.config[self._format_config_key].as_str()
assert isinstance(spec, six.text_type)
return self.evaluate_template(spec)
def __str__(self):
return format(self).encode('utf8')
def __unicode__(self):
return format(self)
def __bytes__(self):
return self.__str__().encode('utf-8')
class FormattedItemMapping(dbcore.db.FormattedMapping):
"""Add lookup for album-level fields.
@ -407,7 +435,10 @@ class Item(LibModel):
'albumartist_sort': types.STRING,
'albumartist_credit': types.STRING,
'genre': types.STRING,
'lyricist': types.STRING,
'composer': types.STRING,
'composer_sort': types.STRING,
'arranger': types.STRING,
'grouping': types.STRING,
'year': types.PaddedInt(4),
'month': types.PaddedInt(2),
@ -424,6 +455,7 @@ class Item(LibModel):
'mb_albumid': types.STRING,
'mb_artistid': types.STRING,
'mb_albumartistid': types.STRING,
'mb_releasetrackid': types.STRING,
'albumtype': types.STRING,
'label': types.STRING,
'acoustid_fingerprint': types.STRING,
@ -443,6 +475,8 @@ class Item(LibModel):
'rg_track_peak': types.NULL_FLOAT,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'r128_track_gain': types.PaddedInt(6),
'r128_album_gain': types.PaddedInt(6),
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
@ -510,15 +544,15 @@ class Item(LibModel):
"""
# Encode unicode paths and read buffers.
if key == 'path':
if isinstance(value, unicode):
if isinstance(value, six.text_type):
value = bytestring_path(value)
elif isinstance(value, buffer):
elif isinstance(value, BLOB_TYPE):
value = bytes(value)
if key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
changed = super(Item, self)._setitem(key, value)
super(Item, self).__setitem__(key, value)
if changed and key in MediaFile.fields():
self.mtime = 0 # Reset mtime on dirty.
def update(self, values):
"""Set all key/value pairs in the mapping. If mtime is
@ -528,6 +562,11 @@ class Item(LibModel):
if self.mtime == 0 and 'mtime' in values:
self.mtime = values['mtime']
def clear(self):
"""Set all key/value pairs to None."""
for key in self._media_fields:
setattr(self, key, None)
def get_album(self):
"""Get the Album object that this item belongs to, if any, or
None if the item is a singleton or is not associated with a
@ -554,12 +593,12 @@ class Item(LibModel):
read_path = normpath(read_path)
try:
mediafile = MediaFile(syspath(read_path))
except (OSError, IOError, UnreadableFileError) as exc:
except UnreadableFileError as exc:
raise ReadError(read_path, exc)
for key in self._media_fields:
value = getattr(mediafile, key)
if isinstance(value, (int, long)):
if isinstance(value, six.integer_types):
if value.bit_length() > 63:
value = 0
self[key] = value
@ -601,14 +640,14 @@ class Item(LibModel):
try:
mediafile = MediaFile(syspath(path),
id3v23=beets.config['id3v23'].get(bool))
except (OSError, IOError, UnreadableFileError) as exc:
raise ReadError(self.path, exc)
except UnreadableFileError as exc:
raise ReadError(path, exc)
# Write the tags to the file.
mediafile.update(item_tags)
try:
mediafile.save()
except (OSError, IOError, MutagenError) as exc:
except UnreadableFileError as exc:
raise WriteError(self.path, exc)
# The file has a new mtime.
@ -653,27 +692,34 @@ class Item(LibModel):
# Files themselves.
def move_file(self, dest, copy=False, link=False):
"""Moves or copies the item's file, updating the path value if
the move succeeds. If a file exists at ``dest``, then it is
slightly modified to be unique.
def move_file(self, dest, operation=MoveOperation.MOVE):
"""Move, copy, link or hardlink the item's depending on `operation`,
updating the path value if the move succeeds.
If a file exists at `dest`, then it is slightly modified to be unique.
`operation` should be an instance of `util.MoveOperation`.
"""
if not util.samefile(self.path, dest):
dest = util.unique_path(dest)
if copy:
util.copy(self.path, dest)
plugins.send("item_copied", item=self, source=self.path,
destination=dest)
elif link:
util.link(self.path, dest)
plugins.send("item_linked", item=self, source=self.path,
destination=dest)
else:
if operation == MoveOperation.MOVE:
plugins.send("before_item_moved", item=self, source=self.path,
destination=dest)
util.move(self.path, dest)
plugins.send("item_moved", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.COPY:
util.copy(self.path, dest)
plugins.send("item_copied", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.LINK:
util.link(self.path, dest)
plugins.send("item_linked", item=self, source=self.path,
destination=dest)
elif operation == MoveOperation.HARDLINK:
util.hardlink(self.path, dest)
plugins.send("item_hardlinked", item=self, source=self.path,
destination=dest)
# Either copying or moving succeeded, so update the stored path.
self.path = dest
@ -720,26 +766,27 @@ class Item(LibModel):
self._db._memotable = {}
def move(self, copy=False, link=False, basedir=None, with_album=True):
def move(self, operation=MoveOperation.MOVE, basedir=None,
with_album=True, store=True):
"""Move the item to its designated location within the library
directory (provided by destination()). Subdirectories are
created as needed. If the operation succeeds, the item's path
field is updated to reflect the new location.
If `copy` is true, moving the file is copied rather than moved.
Similarly, `link` creates a symlink instead.
Instead of moving the item it can also be copied, linked or hardlinked
depending on `operation` which should be an instance of
`util.MoveOperation`.
basedir overrides the library base directory for the
destination.
`basedir` overrides the library base directory for the destination.
If the item is in an album, the album is given an opportunity to
move its art. (This can be disabled by passing
with_album=False.)
If the item is in an album and `with_album` is `True`, the album is
given an opportunity to move its art.
The item is stored to the database if it is in the database, so
any dirty fields prior to the move() call will be written as a
side effect. You probably want to call save() to commit the DB
transaction.
By default, the item is stored to the database if it is in the
database, so any dirty fields prior to the move() call will be written
as a side effect.
If `store` is `False` however, the item won't be stored and you'll
have to manually store it after invoking this method.
"""
self._check_db()
dest = self.destination(basedir=basedir)
@ -749,18 +796,20 @@ class Item(LibModel):
# Perform the move and store the change.
old_path = self.path
self.move_file(dest, copy, link)
self.move_file(dest, operation)
if store:
self.store()
# If this item is in an album, move its art.
if with_album:
album = self.get_album()
if album:
album.move_art(copy)
album.move_art(operation)
if store:
album.store()
# Prune vacated directory.
if not copy:
if operation == MoveOperation.MOVE:
util.prune_dirs(os.path.dirname(old_path), self._db.directory)
# Templating.
@ -811,7 +860,10 @@ class Item(LibModel):
subpath = unicodedata.normalize('NFC', subpath)
if beets.config['asciify_paths']:
subpath = unidecode(subpath)
subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
maxlen = beets.config['max_filename_length'].get(int)
if not maxlen:
@ -833,7 +885,7 @@ class Item(LibModel):
)
if fragment:
return subpath
return util.as_string(subpath)
else:
return normpath(os.path.join(basedir, subpath))
@ -848,7 +900,7 @@ class Album(LibModel):
_always_dirty = True
_fields = {
'id': types.PRIMARY_ID,
'artpath': PathType(),
'artpath': PathType(True),
'added': DateType(),
'albumartist': types.STRING,
@ -875,6 +927,7 @@ class Album(LibModel):
'albumdisambig': types.STRING,
'rg_album_gain': types.NULL_FLOAT,
'rg_album_peak': types.NULL_FLOAT,
'r128_album_gain': types.PaddedInt(6),
'original_year': types.PaddedInt(4),
'original_month': types.PaddedInt(2),
'original_day': types.PaddedInt(2),
@ -918,6 +971,7 @@ class Album(LibModel):
'albumdisambig',
'rg_album_gain',
'rg_album_peak',
'r128_album_gain',
'original_year',
'original_month',
'original_day',
@ -962,9 +1016,12 @@ class Album(LibModel):
for item in self.items():
item.remove(delete, False)
def move_art(self, copy=False, link=False):
"""Move or copy any existing album art so that it remains in the
same directory as the items.
def move_art(self, operation=MoveOperation.MOVE):
"""Move, copy, link or hardlink (depending on `operation`) any
existing album art so that it remains in the same directory as
the items.
`operation` should be an instance of `util.MoveOperation`.
"""
old_art = self.artpath
if not old_art:
@ -978,38 +1035,46 @@ class Album(LibModel):
log.debug(u'moving album art {0} to {1}',
util.displayable_path(old_art),
util.displayable_path(new_art))
if copy:
util.copy(old_art, new_art)
elif link:
util.link(old_art, new_art)
else:
if operation == MoveOperation.MOVE:
util.move(old_art, new_art)
util.prune_dirs(os.path.dirname(old_art), self._db.directory)
elif operation == MoveOperation.COPY:
util.copy(old_art, new_art)
elif operation == MoveOperation.LINK:
util.link(old_art, new_art)
elif operation == MoveOperation.HARDLINK:
util.hardlink(old_art, new_art)
self.artpath = new_art
# Prune old path when moving.
if not copy:
util.prune_dirs(os.path.dirname(old_art),
self._db.directory)
def move(self, operation=MoveOperation.MOVE, basedir=None, store=True):
"""Move, copy, link or hardlink (depending on `operation`)
all items to their destination. Any album art moves along with them.
def move(self, copy=False, link=False, basedir=None):
"""Moves (or copies) all items to their destination. Any album
art moves along with them. basedir overrides the library base
directory for the destination. The album is stored to the
database, persisting any modifications to its metadata.
`basedir` overrides the library base directory for the destination.
`operation` should be an instance of `util.MoveOperation`.
By default, the album is stored to the database, persisting any
modifications to its metadata. If `store` is `False` however,
the album is not stored automatically, and you'll have to manually
store it after invoking this method.
"""
basedir = basedir or self._db.directory
# Ensure new metadata is available to items for destination
# computation.
if store:
self.store()
# Move items.
items = list(self.items())
for item in items:
item.move(copy, link, basedir=basedir, with_album=False)
item.move(operation, basedir=basedir, with_album=False,
store=store)
# Move art.
self.move_art(copy, link)
self.move_art(operation)
if store:
self.store()
def item_dir(self):
@ -1054,10 +1119,14 @@ class Album(LibModel):
image = bytestring_path(image)
item_dir = item_dir or self.item_dir()
filename_tmpl = Template(beets.config['art_filename'].get(unicode))
filename_tmpl = Template(
beets.config['art_filename'].as_str())
subpath = self.evaluate_template(filename_tmpl, True)
if beets.config['asciify_paths']:
subpath = unidecode(subpath)
subpath = util.asciify_path(
subpath,
beets.config['path_sep_replace'].as_str()
)
subpath = util.sanitize_path(subpath,
replacements=self._db.replacements)
subpath = bytestring_path(subpath)
@ -1098,9 +1167,11 @@ class Album(LibModel):
plugins.send('art_set', album=self)
def store(self):
def store(self, fields=None):
"""Update the database with the album information. The album's
tracks are also updated.
:param fields: The fields to be stored. If not specified, all fields
will be.
"""
# Get modified track fields.
track_updates = {}
@ -1109,7 +1180,7 @@ class Album(LibModel):
track_updates[key] = self[key]
with self._db.transaction():
super(Album, self).store()
super(Album, self).store(fields)
if track_updates:
for item in self.items():
for key, value in track_updates.items():
@ -1172,7 +1243,8 @@ def parse_query_string(s, model_cls):
The string is split into components using shell-like syntax.
"""
assert isinstance(s, unicode), u"Query is not unicode: {0!r}".format(s)
message = u"Query is not unicode: {0!r}".format(s)
assert isinstance(s, six.text_type), message
try:
parts = util.shlex_split(s)
except ValueError as exc:
@ -1180,6 +1252,19 @@ def parse_query_string(s, model_cls):
return parse_query_parts(parts, model_cls)
def _sqlite_bytelower(bytestring):
""" A custom ``bytelower`` sqlite function so we can compare
bytestrings in a semi case insensitive fashion. This is to work
around sqlite builds are that compiled with
``-DSQLITE_LIKE_DOESNT_MATCH_BLOBS``. See
``https://github.com/beetbox/beets/issues/2172`` for details.
"""
if not six.PY2:
return bytestring.lower()
return buffer(bytes(bytestring).lower()) # noqa: F821
# The Library: interface to the database.
class Library(dbcore.Database):
@ -1192,9 +1277,8 @@ class Library(dbcore.Database):
path_formats=((PF_KEY_DEFAULT,
'$artist/$album/$track $title'),),
replacements=None):
if path != ':memory:':
self.path = bytestring_path(normpath(path))
super(Library, self).__init__(path)
timeout = beets.config['timeout'].as_number()
super(Library, self).__init__(path, timeout=timeout)
self.directory = bytestring_path(normpath(directory))
self.path_formats = path_formats
@ -1202,6 +1286,11 @@ class Library(dbcore.Database):
self._memotable = {} # Used for template substitution performance.
def _create_connection(self):
conn = super(Library, self)._create_connection()
conn.create_function('bytelower', 1, _sqlite_bytelower)
return conn
# Adding objects to the database.
def add(self, obj):
@ -1248,11 +1337,11 @@ class Library(dbcore.Database):
# Parse the query, if necessary.
try:
parsed_sort = None
if isinstance(query, basestring):
if isinstance(query, six.string_types):
query, parsed_sort = parse_query_string(query, model_cls)
elif isinstance(query, (list, tuple)):
query, parsed_sort = parse_query_parts(query, model_cls)
except dbcore.query.InvalidQueryArgumentTypeError as exc:
except dbcore.query.InvalidQueryArgumentValueError as exc:
raise dbcore.InvalidQueryError(query, exc)
# Any non-null sort specified by the parsed query overrides the
@ -1392,22 +1481,24 @@ class DefaultTemplateFunctions(object):
def tmpl_asciify(s):
"""Translate non-ASCII characters to their ASCII equivalents.
"""
return unidecode(s)
return util.asciify_path(s, beets.config['path_sep_replace'].as_str())
@staticmethod
def tmpl_time(s, fmt):
"""Format a time value using `strftime`.
"""
cur_fmt = beets.config['time_format'].get(unicode)
cur_fmt = beets.config['time_format'].as_str()
return time.strftime(fmt, time.strptime(s, cur_fmt))
def tmpl_aunique(self, keys=None, disam=None):
def tmpl_aunique(self, keys=None, disam=None, bracket=None):
"""Generate a string that is guaranteed to be unique among all
albums in the library who share the same set of keys. A fields
from "disam" is used in the string if one is sufficient to
disambiguate the albums. Otherwise, a fallback opaque value is
used. Both "keys" and "disam" should be given as
whitespace-separated lists of field names.
whitespace-separated lists of field names, while "bracket" is a
pair of characters to be used as brackets surrounding the
disambiguator or empty to have no brackets.
"""
# Fast paths: no album, no item or library, or memoized value.
if not self.item or not self.lib:
@ -1421,9 +1512,19 @@ class DefaultTemplateFunctions(object):
keys = keys or 'albumartist album'
disam = disam or 'albumtype year label catalognum albumdisambig'
if bracket is None:
bracket = '[]'
keys = keys.split()
disam = disam.split()
# Assign a left and right bracket or leave blank if argument is empty.
if len(bracket) == 2:
bracket_l = bracket[0]
bracket_r = bracket[1]
else:
bracket_l = u''
bracket_r = u''
album = self.lib.get_album(self.item)
if not album:
# Do nothing for singletons.
@ -1456,13 +1557,19 @@ class DefaultTemplateFunctions(object):
else:
# No disambiguator distinguished all fields.
res = u' {0}'.format(album.id)
res = u' {1}{0}{2}'.format(album.id, bracket_l, bracket_r)
self.lib._memotable[memokey] = res
return res
# Flatten disambiguation value into a string.
disam_value = album.formatted(True).get(disambiguator)
res = u' [{0}]'.format(disam_value)
# Return empty string if disambiguator is empty.
if disam_value:
res = u' {1}{0}{2}'.format(disam_value, bracket_l, bracket_r)
else:
res = u''
self.lib._memotable[memokey] = res
return res

View file

@ -27,6 +27,7 @@ from copy import copy
from logging import * # noqa
import subprocess
import threading
import six
def logsafe(val):
@ -42,7 +43,7 @@ def logsafe(val):
example.
"""
# Already Unicode.
if isinstance(val, unicode):
if isinstance(val, six.text_type):
return val
# Bytestring: needs decoding.
@ -51,16 +52,16 @@ def logsafe(val):
# (a) only do this for paths, if they can be given a distinct
# type, and (b) warn the developer if they do this for other
# bytestrings.
return val.decode('utf8', 'replace')
return val.decode('utf-8', 'replace')
# A "problem" object: needs a workaround.
elif isinstance(val, subprocess.CalledProcessError):
try:
return unicode(val)
return six.text_type(val)
except UnicodeDecodeError:
# An object with a broken __unicode__ formatter. Use __str__
# instead.
return str(val).decode('utf8', 'replace')
return str(val).decode('utf-8', 'replace')
# Other objects are used as-is so field access, etc., still works in
# the format string.

File diff suppressed because it is too large Load diff

View file

@ -27,6 +27,7 @@ from functools import wraps
import beets
from beets import logging
from beets import mediafile
import six
PLUGIN_NAMESPACE = 'beetsplug'
@ -54,10 +55,10 @@ class PluginLogFilter(logging.Filter):
def filter(self, record):
if hasattr(record.msg, 'msg') and isinstance(record.msg.msg,
basestring):
six.string_types):
# A _LogMessage from our hacked-up Logging replacement.
record.msg.msg = self.prefix + record.msg.msg
elif isinstance(record.msg, basestring):
elif isinstance(record.msg, six.string_types):
record.msg = self.prefix + record.msg
return True
@ -80,6 +81,7 @@ class BeetsPlugin(object):
self.template_fields = {}
if not self.album_template_fields:
self.album_template_fields = {}
self.early_import_stages = []
self.import_stages = []
self._log = log.getChild(self.name)
@ -93,6 +95,22 @@ class BeetsPlugin(object):
"""
return ()
def _set_stage_log_level(self, stages):
"""Adjust all the stages in `stages` to WARNING logging level.
"""
return [self._set_log_level_and_params(logging.WARNING, stage)
for stage in stages]
def get_early_import_stages(self):
"""Return a list of functions that should be called as importer
pipelines stages early in the pipeline.
The callables are wrapped versions of the functions in
`self.early_import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return self._set_stage_log_level(self.early_import_stages)
def get_import_stages(self):
"""Return a list of functions that should be called as importer
pipelines stages.
@ -101,8 +119,7 @@ class BeetsPlugin(object):
`self.import_stages`. Wrapping provides some bookkeeping for the
plugin: specifically, the logging level is adjusted to WARNING.
"""
return [self._set_log_level_and_params(logging.WARNING, import_stage)
for import_stage in self.import_stages]
return self._set_stage_log_level(self.import_stages)
def _set_log_level_and_params(self, base_log_level, func):
"""Wrap `func` to temporarily set this plugin's logger level to
@ -254,7 +271,7 @@ def load_plugins(names=()):
except ImportError as exc:
# Again, this is hacky:
if exc.args[0].endswith(' ' + name):
log.warn(u'** plugin {0} not found', name)
log.warning(u'** plugin {0} not found', name)
else:
raise
else:
@ -263,8 +280,8 @@ def load_plugins(names=()):
and obj != BeetsPlugin and obj not in _classes:
_classes.add(obj)
except:
log.warn(
except Exception:
log.warning(
u'** error loading plugin {}:\n{}',
name,
traceback.format_exc(),
@ -350,41 +367,35 @@ def album_distance(items, album_info, mapping):
def candidates(items, artist, album, va_likely):
"""Gets MusicBrainz candidates for an album from each plugin.
"""
out = []
for plugin in find_plugins():
out.extend(plugin.candidates(items, artist, album, va_likely))
return out
for candidate in plugin.candidates(items, artist, album, va_likely):
yield candidate
def item_candidates(item, artist, title):
"""Gets MusicBrainz candidates for an item from the plugins.
"""
out = []
for plugin in find_plugins():
out.extend(plugin.item_candidates(item, artist, title))
return out
for item_candidate in plugin.item_candidates(item, artist, title):
yield item_candidate
def album_for_id(album_id):
"""Get AlbumInfo objects for a given ID string.
"""
out = []
for plugin in find_plugins():
res = plugin.album_for_id(album_id)
if res:
out.append(res)
return out
album = plugin.album_for_id(album_id)
if album:
yield album
def track_for_id(track_id):
"""Get TrackInfo objects for a given ID string.
"""
out = []
for plugin in find_plugins():
res = plugin.track_for_id(track_id)
if res:
out.append(res)
return out
track = plugin.track_for_id(track_id)
if track:
yield track
def template_funcs():
@ -398,6 +409,14 @@ def template_funcs():
return funcs
def early_import_stages():
"""Get a list of early import stage functions defined by plugins."""
stages = []
for plugin in find_plugins():
stages += plugin.get_early_import_stages()
return stages
def import_stages():
"""Get a list of import stage functions defined by plugins."""
stages = []
@ -483,7 +502,64 @@ def sanitize_choices(choices, choices_all):
others = [x for x in choices_all if x not in choices]
res = []
for s in choices:
if s in list(choices_all) + ['*']:
if not (s in seen or seen.add(s)):
res.extend(list(others) if s == '*' else [s])
if s not in seen:
if s in list(choices_all):
res.append(s)
elif s == '*':
res.extend(others)
seen.add(s)
return res
def sanitize_pairs(pairs, pairs_all):
"""Clean up a single-element mapping configuration attribute as returned
by `confit`'s `Pairs` template: keep only two-element tuples present in
pairs_all, remove duplicate elements, expand ('str', '*') and ('*', '*')
wildcards while keeping the original order. Note that ('*', '*') and
('*', 'whatever') have the same effect.
For example,
>>> sanitize_pairs(
... [('foo', 'baz bar'), ('key', '*'), ('*', '*')],
... [('foo', 'bar'), ('foo', 'baz'), ('foo', 'foobar'),
... ('key', 'value')]
... )
[('foo', 'baz'), ('foo', 'bar'), ('key', 'value'), ('foo', 'foobar')]
"""
pairs_all = list(pairs_all)
seen = set()
others = [x for x in pairs_all if x not in pairs]
res = []
for k, values in pairs:
for v in values.split():
x = (k, v)
if x in pairs_all:
if x not in seen:
seen.add(x)
res.append(x)
elif k == '*':
new = [o for o in others if o not in seen]
seen.update(new)
res.extend(new)
elif v == '*':
new = [o for o in others if o not in seen and o[0] == k]
seen.update(new)
res.extend(new)
return res
def notify_info_yielded(event):
"""Makes a generator send the event 'event' every time it yields.
This decorator is supposed to decorate a generator, but any function
returning an iterable should work.
Each yielded value is passed to plugins using the 'info' parameter of
'send'.
"""
def decorator(generator):
def decorated(*args, **kwargs):
for v in generator(*args, **kwargs):
send(event, info=v)
yield v
return decorated
return decorator

View file

@ -20,7 +20,6 @@ CLI commands are implemented in the ui.commands module.
from __future__ import division, absolute_import, print_function
import locale
import optparse
import textwrap
import sys
@ -31,6 +30,7 @@ import re
import struct
import traceback
import os.path
from six.moves import input
from beets import logging
from beets import library
@ -38,9 +38,11 @@ from beets import plugins
from beets import util
from beets.util.functemplate import Template
from beets import config
from beets.util import confit
from beets.util import confit, as_string
from beets.autotag import mb
from beets.dbcore import query as db_query
from beets.dbcore import db
import six
# On Windows platforms, use colorama to support "ANSI" terminal colors.
if sys.platform == 'win32':
@ -73,51 +75,47 @@ class UserError(Exception):
# Encoding utilities.
def _in_encoding(default=u'utf-8'):
def _in_encoding():
"""Get the encoding to use for *inputting* strings from the console.
:param default: the fallback sys.stdin encoding
"""
return config['terminal_encoding'].get() or getattr(sys.stdin, 'encoding',
default)
return _stream_encoding(sys.stdin)
def _out_encoding():
"""Get the encoding to use for *outputting* strings to the console.
"""
return _stream_encoding(sys.stdout)
def _stream_encoding(stream, default='utf-8'):
"""A helper for `_in_encoding` and `_out_encoding`: get the stream's
preferred encoding, using a configured override or a default
fallback if neither is not specified.
"""
# Configured override?
encoding = config['terminal_encoding'].get()
if encoding:
return encoding
# For testing: When sys.stdout is a StringIO under the test harness,
# it doesn't have an `encoding` attribute. Just use UTF-8.
if not hasattr(sys.stdout, 'encoding'):
return 'utf8'
# For testing: When sys.stdout or sys.stdin is a StringIO under the
# test harness, it doesn't have an `encoding` attribute. Just use
# UTF-8.
if not hasattr(stream, 'encoding'):
return default
# Python's guessed output stream encoding, or UTF-8 as a fallback
# (e.g., when piped to a file).
return sys.stdout.encoding or 'utf8'
def _arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf8'
return stream.encoding or default
def decargs(arglist):
"""Given a list of command-line argument bytestrings, attempts to
decode them to Unicode strings.
decode them to Unicode strings when running under Python 2.
"""
return [s.decode(_arg_encoding()) for s in arglist]
if six.PY2:
return [s.decode(util.arg_encoding()) for s in arglist]
else:
return arglist
def print_(*strings, **kwargs):
@ -125,26 +123,36 @@ def print_(*strings, **kwargs):
is not in the terminal's encoding's character set, just silently
replaces it.
If the arguments are strings then they're expected to share the same
type: either bytes or unicode.
The arguments must be Unicode strings: `unicode` on Python 2; `str` on
Python 3.
The `end` keyword argument behaves similarly to the built-in `print`
(it defaults to a newline). The value should have the same string
type as the arguments.
(it defaults to a newline).
"""
end = kwargs.get('end')
if not strings:
strings = [u'']
assert isinstance(strings[0], six.text_type)
if not strings or isinstance(strings[0], unicode):
txt = u' '.join(strings)
txt += u'\n' if end is None else end
txt += kwargs.get('end', u'\n')
# Encode the string and write it to stdout.
if six.PY2:
# On Python 2, sys.stdout expects bytes.
out = txt.encode(_out_encoding(), 'replace')
sys.stdout.write(out)
else:
txt = b' '.join(strings)
txt += b'\n' if end is None else end
# Always send bytes to the stdout stream.
if isinstance(txt, unicode):
txt = txt.encode(_out_encoding(), 'replace')
# On Python 3, sys.stdout expects text strings and uses the
# exception-throwing encoding error policy. To avoid throwing
# errors and use our configurable encoding override, we use the
# underlying bytes buffer instead.
if hasattr(sys.stdout, 'buffer'):
out = txt.encode(_out_encoding(), 'replace')
sys.stdout.buffer.write(out)
sys.stdout.buffer.flush()
else:
# In our test harnesses (e.g., DummyOut), sys.stdout.buffer
# does not exist. We instead just record the text string.
sys.stdout.write(txt)
@ -188,23 +196,26 @@ def should_move(move_opt=None):
# Input prompts.
def input_(prompt=None):
"""Like `raw_input`, but decodes the result to a Unicode string.
"""Like `input`, but decodes the result to a Unicode string.
Raises a UserError if stdin is not available. The prompt is sent to
stdout rather than stderr. A printed between the prompt and the
input cursor.
"""
# raw_input incorrectly sends prompts to stderr, not stdout, so we
# use print() explicitly to display prompts.
# use print_() explicitly to display prompts.
# http://bugs.python.org/issue1927
if prompt:
print_(prompt, end=' ')
print_(prompt, end=u' ')
try:
resp = raw_input()
resp = input()
except EOFError:
raise UserError(u'stdin stream ended while input required')
if six.PY2:
return resp.decode(_in_encoding(), 'ignore')
else:
return resp
def input_options(options, require=False, prompt=None, fallback_prompt=None,
@ -256,7 +267,7 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
# Mark the option's shortcut letter for display.
if not require and (
(default is None and not numrange and first) or
(isinstance(default, basestring) and
(isinstance(default, six.string_types) and
found_letter.lower() == default.lower())):
# The first option is the default; mark it.
show_letter = '[%s]' % found_letter.upper()
@ -292,11 +303,11 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = unicode(default)
default_name = six.text_type(default)
default_name = colorize('action_default', default_name)
tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % unicode(default)))
prompt_part_lengths.append(len(tmpl % six.text_type(default)))
else:
prompt_parts.append('# selection')
prompt_part_lengths.append(len(prompt_parts[-1]))
@ -516,7 +527,8 @@ def colorize(color_name, text):
if config['ui']['color']:
global COLORS
if not COLORS:
COLORS = dict((name, config['ui']['colors'][name].get(unicode))
COLORS = dict((name,
config['ui']['colors'][name].as_str())
for name in COLOR_NAMES)
# In case a 3rd party plugin is still passing the actual color ('red')
# instead of the abstract color name ('text_error')
@ -536,10 +548,11 @@ def _colordiff(a, b, highlight='text_highlight',
highlighted intelligently to show differences; other values are
stringified and highlighted in their entirety.
"""
if not isinstance(a, basestring) or not isinstance(b, basestring):
if not isinstance(a, six.string_types) \
or not isinstance(b, six.string_types):
# Non-strings: use ordinary equality.
a = unicode(a)
b = unicode(b)
a = six.text_type(a)
b = six.text_type(b)
if a == b:
return a, b
else:
@ -587,7 +600,7 @@ def colordiff(a, b, highlight='text_highlight'):
if config['ui']['color']:
return _colordiff(a, b, highlight)
else:
return unicode(a), unicode(b)
return six.text_type(a), six.text_type(b)
def get_path_formats(subview=None):
@ -598,7 +611,7 @@ def get_path_formats(subview=None):
subview = subview or config['paths']
for query, view in subview.items():
query = PF_KEY_QUERIES.get(query, query) # Expand common queries.
path_formats.append((query, Template(view.get(unicode))))
path_formats.append((query, Template(view.as_str())))
return path_formats
@ -666,7 +679,7 @@ def _field_diff(field, old, new):
# For strings, highlight changes. For others, colorize the whole
# thing.
if isinstance(oldval, basestring):
if isinstance(oldval, six.string_types):
oldstr, newstr = colordiff(oldval, newstr)
else:
oldstr = colorize('text_error', oldstr)
@ -757,6 +770,34 @@ def show_path_changes(path_changes):
log.info(u'{0} {1} -> {2}', source, ' ' * pad, dest)
# Helper functions for option parsing.
def _store_dict(option, opt_str, value, parser):
"""Custom action callback to parse options which have ``key=value``
pairs as values. All such pairs passed for this option are
aggregated into a dictionary.
"""
dest = option.dest
option_values = getattr(parser.values, dest, None)
if option_values is None:
# This is the first supplied ``key=value`` pair of option.
# Initialize empty dictionary and get a reference to it.
setattr(parser.values, dest, dict())
option_values = getattr(parser.values, dest)
try:
key, value = map(lambda s: util.text_string(s), value.split('='))
if not (key and value):
raise ValueError
except ValueError:
raise UserError(
"supplied argument `{0}' is not of the form `key=value'"
.format(value))
option_values[key] = value
class CommonOptionsParser(optparse.OptionParser, object):
"""Offers a simple way to add common formatting options.
@ -799,7 +840,14 @@ class CommonOptionsParser(optparse.OptionParser, object):
if store_true:
setattr(parser.values, option.dest, True)
value = fmt or value and unicode(value) or ''
# Use the explicitly specified format, or the string from the option.
if fmt:
value = fmt
elif value:
value, = decargs([value])
else:
value = u''
parser.values.format = value
if target:
config[target._format_config_key].set(value)
@ -830,7 +878,7 @@ class CommonOptionsParser(optparse.OptionParser, object):
"""
path = optparse.Option(*flags, nargs=0, action='callback',
callback=self._set_format,
callback_kwargs={'fmt': '$path',
callback_kwargs={'fmt': u'$path',
'store_true': True},
help=u'print paths for matched items or albums')
self.add_option(path)
@ -852,7 +900,7 @@ class CommonOptionsParser(optparse.OptionParser, object):
"""
kwargs = {}
if target:
if isinstance(target, basestring):
if isinstance(target, six.string_types):
target = {'item': library.Item,
'album': library.Album}[target]
kwargs['target'] = target
@ -911,7 +959,7 @@ class Subcommand(object):
def root_parser(self, root_parser):
self._root_parser = root_parser
self.parser.prog = '{0} {1}'.format(
root_parser.get_prog_name().decode('utf8'), self.name)
as_string(root_parser.get_prog_name()), self.name)
class SubcommandsOptionParser(CommonOptionsParser):
@ -1044,54 +1092,24 @@ class SubcommandsOptionParser(CommonOptionsParser):
optparse.Option.ALWAYS_TYPED_ACTIONS += ('callback',)
def vararg_callback(option, opt_str, value, parser):
"""Callback for an option with variable arguments.
Manually collect arguments right of a callback-action
option (ie. with action="callback"), and add the resulting
list to the destination var.
Usage:
parser.add_option("-c", "--callback", dest="vararg_attr",
action="callback", callback=vararg_callback)
Details:
http://docs.python.org/2/library/optparse.html#callback-example-6-variable
-arguments
"""
value = [value]
def floatable(str):
try:
float(str)
return True
except ValueError:
return False
for arg in parser.rargs:
# stop on --foo like options
if arg[:2] == "--" and len(arg) > 2:
break
# stop on -a, but not on -3 or -3.0
if arg[:1] == "-" and len(arg) > 1 and not floatable(arg):
break
value.append(arg)
del parser.rargs[:len(value) - 1]
setattr(parser.values, option.dest, value)
# The main entry point and bootstrapping.
def _load_plugins(config):
"""Load the plugins specified in the configuration.
"""
paths = config['pluginpath'].get(confit.StrSeq(split=False))
paths = map(util.normpath, paths)
paths = config['pluginpath'].as_str_seq(split=False)
paths = [util.normpath(p) for p in paths]
log.debug(u'plugin paths: {0}', util.displayable_path(paths))
# On Python 3, the search paths need to be unicode.
paths = [util.py3_path(p) for p in paths]
# Extend the `beetsplug` package to include the plugin paths.
import beetsplug
beetsplug.__path__ = paths + beetsplug.__path__
# For backwards compatibility.
# For backwards compatibility, also support plugin paths that
# *contain* a `beetsplug` package.
sys.path += paths
plugins.load_plugins(config['plugins'].as_str_seq())
@ -1133,9 +1151,11 @@ def _configure(options):
# special handling lets specified plugins get loaded before we
# finish parsing the command line.
if getattr(options, 'config', None) is not None:
config_path = options.config
overlay_path = options.config
del options.config
config.set_file(config_path)
config.set_file(overlay_path)
else:
overlay_path = None
config.set_args(options)
# Configure the logger.
@ -1144,27 +1164,9 @@ def _configure(options):
else:
log.set_global_level(logging.INFO)
# Ensure compatibility with old (top-level) color configuration.
# Deprecation msg to motivate user to switch to config['ui']['color].
if config['color'].exists():
log.warning(u'Warning: top-level configuration of `color` '
u'is deprecated. Configure color use under `ui`. '
u'See documentation for more info.')
config['ui']['color'].set(config['color'].get(bool))
# Compatibility from list_format_{item,album} to format_{item,album}
for elem in ('item', 'album'):
old_key = 'list_format_{0}'.format(elem)
if config[old_key].exists():
new_key = 'format_{0}'.format(elem)
log.warning(
u'Warning: configuration uses "{0}" which is deprecated'
u' in favor of "{1}" now that it affects all commands. '
u'See changelog & documentation.',
old_key,
new_key,
)
config[new_key].set(config[old_key])
if overlay_path:
log.debug(u'overlaying configuration: {0}',
util.displayable_path(overlay_path))
config_path = config.user_config_path()
if os.path.isfile(config_path):
@ -1182,7 +1184,7 @@ def _configure(options):
def _open_library(config):
"""Create a new library instance from the configuration.
"""
dbpath = config['library'].as_filename()
dbpath = util.bytestring_path(config['library'].as_filename())
try:
lib = library.Library(
dbpath,
@ -1233,6 +1235,7 @@ def _raw_main(args, lib=None):
from beets.ui.commands import config_edit
return config_edit()
test_lib = bool(lib)
subcommands, plugins, lib = _setup(options, lib)
parser.add_subcommand(*subcommands)
@ -1240,6 +1243,9 @@ def _raw_main(args, lib=None):
subcommand.func(lib, suboptions, subargs)
plugins.send('cli_exit', lib=lib)
if not test_lib:
# Clean up the library unless it came from the test harness.
lib._close()
def main(args=None):
@ -1270,9 +1276,16 @@ def main(args=None):
except IOError as exc:
if exc.errno == errno.EPIPE:
# "Broken pipe". End silently.
pass
sys.stderr.close()
else:
raise
except KeyboardInterrupt:
# Silently ignore ^C except in verbose mode.
log.debug(u'{}', traceback.format_exc())
except db.DBAccessError as exc:
log.error(
u'database access error: {0}\n'
u'the library file might have a permissions problem',
exc
)
sys.exit(1)

View file

@ -21,6 +21,7 @@ from __future__ import division, absolute_import, print_function
import os
import re
from platform import python_version
from collections import namedtuple, Counter
from itertools import chain
@ -33,14 +34,17 @@ from beets.autotag import hooks
from beets import plugins
from beets import importer
from beets import util
from beets.util import syspath, normpath, ancestry, displayable_path
from beets.util import syspath, normpath, ancestry, displayable_path, \
MoveOperation
from beets import library
from beets import config
from beets import logging
from beets.util.confit import _package_path
import six
from . import _store_dict
VARIOUS_ARTISTS = u'Various Artists'
PromptChoice = namedtuple('ExtraChoice', ['short', 'long', 'callback'])
PromptChoice = namedtuple('PromptChoice', ['short', 'long', 'callback'])
# Global logger.
log = logging.getLogger('beets')
@ -82,16 +86,16 @@ def _do_query(lib, query, album, also_items=True):
def _print_keys(query):
"""Given a SQLite query result, print the `key` field of each
returned row, with identation of 2 spaces.
returned row, with indentation of 2 spaces.
"""
for row in query:
print_(' ' * 2 + row['key'])
print_(u' ' * 2 + row['key'])
def fields_func(lib, opts, args):
def _print_rows(names):
names.sort()
print_(" " + "\n ".join(names))
print_(u' ' + u'\n '.join(names))
print_(u"Item fields:")
_print_rows(library.Item.all_keys())
@ -156,14 +160,14 @@ def disambig_string(info):
if isinstance(info, hooks.AlbumInfo):
if info.media:
if info.mediums > 1:
if info.mediums and info.mediums > 1:
disambig.append(u'{0}x{1}'.format(
info.mediums, info.media
))
else:
disambig.append(info.media)
if info.year:
disambig.append(unicode(info.year))
disambig.append(six.text_type(info.year))
if info.country:
disambig.append(info.country)
if info.label:
@ -233,12 +237,12 @@ def show_change(cur_artist, cur_album, match):
medium = track_info.disc
mediums = track_info.disctotal
if config['per_disc_numbering']:
if mediums > 1:
if mediums and mediums > 1:
return u'{0}-{1}'.format(medium, medium_index)
else:
return unicode(medium_index)
return six.text_type(medium_index or index)
else:
return unicode(index)
return six.text_type(index)
# Identify the album in question.
if cur_artist != match.info.artist or \
@ -279,7 +283,7 @@ def show_change(cur_artist, cur_album, match):
print_(' '.join(info))
# Tracks.
pairs = match.mapping.items()
pairs = list(match.mapping.items())
pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index)
# Build up LHS and RHS for track difference display. The `lines` list
@ -493,7 +497,7 @@ def _summary_judgment(rec):
def choose_candidate(candidates, singleton, rec, cur_artist=None,
cur_album=None, item=None, itemcount=None,
extra_choices=[]):
choices=[]):
"""Given a sorted list of candidates, ask the user for a selection
of which candidate to use. Applies to both full albums and
singletons (tracks). Candidates are either AlbumMatch or TrackMatch
@ -501,16 +505,12 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
`cur_album`, and `itemcount` must be provided. For singletons,
`item` must be provided.
`extra_choices` is a list of `PromptChoice`s, containg the choices
appended by the plugins after receiving the `before_choose_candidate`
event. If not empty, the choices are appended to the prompt presented
to the user.
`choices` is a list of `PromptChoice`s to be used in each prompt.
Returns one of the following:
* the result of the choice, which may be SKIP, ASIS, TRACKS, or MANUAL
* the result of the choice, which may be SKIP or ASIS
* a candidate (an AlbumMatch/TrackMatch object)
* the short letter of a `PromptChoice` (if the user selected one of
the `extra_choices`).
* a chosen `PromptChoice` from `choices`
"""
# Sanity check.
if singleton:
@ -519,41 +519,22 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
assert cur_artist is not None
assert cur_album is not None
# Build helper variables for extra choices.
extra_opts = tuple(c.long for c in extra_choices)
extra_actions = tuple(c.short for c in extra_choices)
# Build helper variables for the prompt choices.
choice_opts = tuple(c.long for c in choices)
choice_actions = {c.short: c for c in choices}
# Zero candidates.
if not candidates:
if singleton:
print_(u"No matching recordings found.")
opts = (u'Use as-is', u'Skip', u'Enter search', u'enter Id',
u'aBort')
else:
print_(u"No matching release found for {0} tracks."
.format(itemcount))
print_(u'For help, see: '
u'http://beets.readthedocs.org/en/latest/faq.html#nomatch')
opts = (u'Use as-is', u'as Tracks', u'Group albums', u'Skip',
u'Enter search', u'enter Id', u'aBort')
sel = ui.input_options(opts + extra_opts)
if sel == u'u':
return importer.action.ASIS
elif sel == u't':
assert not singleton
return importer.action.TRACKS
elif sel == u'e':
return importer.action.MANUAL
elif sel == u's':
return importer.action.SKIP
elif sel == u'b':
raise importer.ImportAbort()
elif sel == u'i':
return importer.action.MANUAL_ID
elif sel == u'g':
return importer.action.ALBUMS
elif sel in extra_actions:
return sel
sel = ui.input_options(choice_opts)
if sel in choice_actions:
return choice_actions[sel]
else:
assert False
@ -601,33 +582,12 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
print_(u' '.join(line))
# Ask the user for a choice.
if singleton:
opts = (u'Skip', u'Use as-is', u'Enter search', u'enter Id',
u'aBort')
else:
opts = (u'Skip', u'Use as-is', u'as Tracks', u'Group albums',
u'Enter search', u'enter Id', u'aBort')
sel = ui.input_options(opts + extra_opts,
sel = ui.input_options(choice_opts,
numrange=(1, len(candidates)))
if sel == u's':
return importer.action.SKIP
elif sel == u'u':
return importer.action.ASIS
elif sel == u'm':
if sel == u'm':
pass
elif sel == u'e':
return importer.action.MANUAL
elif sel == u't':
assert not singleton
return importer.action.TRACKS
elif sel == u'b':
raise importer.ImportAbort()
elif sel == u'i':
return importer.action.MANUAL_ID
elif sel == u'g':
return importer.action.ALBUMS
elif sel in extra_actions:
return sel
elif sel in choice_actions:
return choice_actions[sel]
else: # Numerical selection.
match = candidates[sel - 1]
if sel != 1:
@ -647,13 +607,6 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
return match
# Ask for confirmation.
if singleton:
opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is',
u'Enter search', u'enter Id', u'aBort')
else:
opts = (u'Apply', u'More candidates', u'Skip', u'Use as-is',
u'as Tracks', u'Group albums', u'Enter search',
u'enter Id', u'aBort')
default = config['import']['default_action'].as_choice({
u'apply': u'a',
u'skip': u's',
@ -662,43 +615,57 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
})
if default is None:
require = True
sel = ui.input_options(opts + extra_opts, require=require,
default=default)
# Bell ring when user interaction is needed.
if config['import']['bell']:
ui.print_(u'\a', end=u'')
sel = ui.input_options((u'Apply', u'More candidates') + choice_opts,
require=require, default=default)
if sel == u'a':
return match
elif sel == u'g':
return importer.action.ALBUMS
elif sel == u's':
return importer.action.SKIP
elif sel == u'u':
return importer.action.ASIS
elif sel == u't':
assert not singleton
return importer.action.TRACKS
elif sel == u'e':
return importer.action.MANUAL
elif sel == u'b':
raise importer.ImportAbort()
elif sel == u'i':
return importer.action.MANUAL_ID
elif sel in extra_actions:
return sel
elif sel in choice_actions:
return choice_actions[sel]
def manual_search(singleton):
"""Input either an artist and album (for full albums) or artist and
def manual_search(session, task):
"""Get a new `Proposal` using manual search criteria.
Input either an artist and album (for full albums) or artist and
track name (for singletons) for manual search.
"""
artist = input_(u'Artist:')
name = input_(u'Track:' if singleton else u'Album:')
return artist.strip(), name.strip()
artist = input_(u'Artist:').strip()
name = input_(u'Album:' if task.is_album else u'Track:').strip()
if task.is_album:
_, _, prop = autotag.tag_album(
task.items, artist, name
)
return prop
else:
return autotag.tag_item(task.item, artist, name)
def manual_id(singleton):
"""Input an ID, either for an album ("release") or a track ("recording").
def manual_id(session, task):
"""Get a new `Proposal` using a manually-entered ID.
Input an ID, either for an album ("release") or a track ("recording").
"""
prompt = u'Enter {0} ID:'.format(u'recording' if singleton else u'release')
return input_(prompt).strip()
prompt = u'Enter {0} ID:'.format(u'release' if task.is_album
else u'recording')
search_id = input_(prompt).strip()
if task.is_album:
_, _, prop = autotag.tag_album(
task.items, search_ids=search_id.split()
)
return prop
else:
return autotag.tag_item(task.item, search_ids=search_id.split())
def abort_action(session, task):
"""A prompt choice callback that aborts the importer.
"""
raise importer.ImportAbort()
class TerminalImportSession(importer.ImportSession):
@ -724,42 +691,34 @@ class TerminalImportSession(importer.ImportSession):
return action
# Loop until we have a choice.
candidates, rec = task.candidates, task.rec
while True:
# Gather extra choices from plugins.
extra_choices = self._get_plugin_choices(task)
extra_ops = {c.short: c.callback for c in extra_choices}
# Ask for a choice from the user.
# Ask for a choice from the user. The result of
# `choose_candidate` may be an `importer.action`, an
# `AlbumMatch` object for a specific selection, or a
# `PromptChoice`.
choices = self._get_choices(task)
choice = choose_candidate(
candidates, False, rec, task.cur_artist, task.cur_album,
itemcount=len(task.items), extra_choices=extra_choices
task.candidates, False, task.rec, task.cur_artist,
task.cur_album, itemcount=len(task.items), choices=choices
)
# Choose which tags to use.
if choice in (importer.action.SKIP, importer.action.ASIS,
importer.action.TRACKS, importer.action.ALBUMS):
# Basic choices that require no more action here.
if choice in (importer.action.SKIP, importer.action.ASIS):
# Pass selection to main control flow.
return choice
elif choice is importer.action.MANUAL:
# Try again with manual search terms.
search_artist, search_album = manual_search(False)
_, _, candidates, rec = autotag.tag_album(
task.items, search_artist, search_album
)
elif choice is importer.action.MANUAL_ID:
# Try a manually-entered ID.
search_id = manual_id(False)
if search_id:
_, _, candidates, rec = autotag.tag_album(
task.items, search_ids=search_id.split()
)
elif choice in extra_ops.keys():
# Allow extra ops to automatically set the post-choice.
post_choice = extra_ops[choice](self, task)
# Plugin-provided choices. We invoke the associated callback
# function.
elif choice in choices:
post_choice = choice.callback(self, task)
if isinstance(post_choice, importer.action):
# MANUAL and MANUAL_ID have no effect, even if returned.
return post_choice
elif isinstance(post_choice, autotag.Proposal):
# Use the new candidates and continue around the loop.
task.candidates = post_choice.candidates
task.rec = post_choice.recommendation
# Otherwise, we have a specific match selection.
else:
# We have a candidate! Finish tagging. Here, choice is an
# AlbumMatch object.
@ -771,7 +730,7 @@ class TerminalImportSession(importer.ImportSession):
either an action constant or a TrackMatch object.
"""
print_()
print_(task.item.path)
print_(displayable_path(task.item.path))
candidates, rec = task.candidates, task.rec
# Take immediate action if appropriate.
@ -784,34 +743,22 @@ class TerminalImportSession(importer.ImportSession):
return action
while True:
extra_choices = self._get_plugin_choices(task)
extra_ops = {c.short: c.callback for c in extra_choices}
# Ask for a choice.
choices = self._get_choices(task)
choice = choose_candidate(candidates, True, rec, item=task.item,
extra_choices=extra_choices)
choices=choices)
if choice in (importer.action.SKIP, importer.action.ASIS):
return choice
elif choice == importer.action.TRACKS:
assert False # TRACKS is only legal for albums.
elif choice == importer.action.MANUAL:
# Continue in the loop with a new set of candidates.
search_artist, search_title = manual_search(True)
candidates, rec = autotag.tag_item(task.item, search_artist,
search_title)
elif choice == importer.action.MANUAL_ID:
# Ask for a track ID.
search_id = manual_id(True)
if search_id:
candidates, rec = autotag.tag_item(
task.item, search_ids=search_id.split())
elif choice in extra_ops.keys():
# Allow extra ops to automatically set the post-choice.
post_choice = extra_ops[choice](self, task)
elif choice in choices:
post_choice = choice.callback(self, task)
if isinstance(post_choice, importer.action):
# MANUAL and MANUAL_ID have no effect, even if returned.
return post_choice
elif isinstance(post_choice, autotag.Proposal):
candidates = post_choice.candidates
rec = post_choice.recommendation
else:
# Chose a candidate.
assert isinstance(choice, autotag.TrackMatch)
@ -821,7 +768,7 @@ class TerminalImportSession(importer.ImportSession):
"""Decide what to do when a new album or item seems similar to one
that's already in the library.
"""
log.warn(u"This {0} is already in the library!",
log.warning(u"This {0} is already in the library!",
(u"album" if task.is_album else u"item"))
if config['import']['quiet']:
@ -843,7 +790,7 @@ class TerminalImportSession(importer.ImportSession):
))
sel = ui.input_options(
(u'Skip new', u'Keep both', u'Remove old')
(u'Skip new', u'Keep both', u'Remove old', u'Merge all')
)
if sel == u's':
@ -855,6 +802,8 @@ class TerminalImportSession(importer.ImportSession):
elif sel == u'r':
# Remove old.
task.should_remove_duplicates = True
elif sel == u'm':
task.should_merge_duplicates = True
else:
assert False
@ -863,8 +812,10 @@ class TerminalImportSession(importer.ImportSession):
u"was interrupted. Resume (Y/n)?"
.format(displayable_path(path)))
def _get_plugin_choices(self, task):
"""Get the extra choices appended to the plugins to the ui prompt.
def _get_choices(self, task):
"""Get the list of prompt choices that should be presented to the
user. This consists of both built-in choices and ones provided by
plugins.
The `before_choose_candidate` event is sent to the plugins, with
session and task as its parameters. Plugins are responsible for
@ -877,20 +828,37 @@ class TerminalImportSession(importer.ImportSession):
Returns a list of `PromptChoice`s.
"""
# Standard, built-in choices.
choices = [
PromptChoice(u's', u'Skip',
lambda s, t: importer.action.SKIP),
PromptChoice(u'u', u'Use as-is',
lambda s, t: importer.action.ASIS)
]
if task.is_album:
choices += [
PromptChoice(u't', u'as Tracks',
lambda s, t: importer.action.TRACKS),
PromptChoice(u'g', u'Group albums',
lambda s, t: importer.action.ALBUMS),
]
choices += [
PromptChoice(u'e', u'Enter search', manual_search),
PromptChoice(u'i', u'enter Id', manual_id),
PromptChoice(u'b', u'aBort', abort_action),
]
# Send the before_choose_candidate event and flatten list.
extra_choices = list(chain(*plugins.send('before_choose_candidate',
session=self, task=task)))
# Take into account default options, for duplicate checking.
all_choices = [PromptChoice(u'a', u'Apply', None),
PromptChoice(u's', u'Skip', None),
PromptChoice(u'u', u'Use as-is', None),
PromptChoice(u't', u'as Tracks', None),
PromptChoice(u'g', u'Group albums', None),
PromptChoice(u'e', u'Enter search', None),
PromptChoice(u'i', u'enter Id', None),
PromptChoice(u'b', u'aBort', None)] +\
extra_choices
# Add a "dummy" choice for the other baked-in option, for
# duplicate checking.
all_choices = [
PromptChoice(u'a', u'Apply', None),
] + choices + extra_choices
# Check for conflicts.
short_letters = [c.short for c in all_choices]
if len(short_letters) != len(set(short_letters)):
# Duplicate short letter has been found.
@ -900,11 +868,12 @@ class TerminalImportSession(importer.ImportSession):
# Keep the first of the choices, removing the rest.
dup_choices = [c for c in all_choices if c.short == short]
for c in dup_choices[1:]:
log.warn(u"Prompt choice '{0}' removed due to conflict "
log.warning(u"Prompt choice '{0}' removed due to conflict "
u"with '{1}' (short letter: '{2}')",
c.long, dup_choices[0].long, c.short)
extra_choices.remove(c)
return extra_choices
return choices + extra_choices
# The import command.
@ -964,6 +933,13 @@ def import_func(lib, opts, args):
if not paths:
raise ui.UserError(u'no path specified')
# On Python 2, we get filenames as raw bytes, which is what we
# need. On Python 3, we need to undo the "helpful" conversion to
# Unicode strings to get the real bytestring filename.
if not six.PY2:
paths = [p.encode(util.arg_encoding(), 'surrogateescape')
for p in paths]
import_files(lib, paths, query)
@ -978,6 +954,10 @@ import_cmd.parser.add_option(
u'-C', u'--nocopy', action='store_false', dest='copy',
help=u"don't copy tracks (opposite of -c)"
)
import_cmd.parser.add_option(
u'-m', u'--move', action='store_true', dest='move',
help=u"move tracks into the library (overrides -c)"
)
import_cmd.parser.add_option(
u'-w', u'--write', action='store_true', default=None,
help=u"write new metadata to files' tags (default)"
@ -1030,6 +1010,10 @@ import_cmd.parser.add_option(
u'-I', u'--noincremental', dest='incremental', action='store_false',
help=u'do not skip already-imported directories'
)
import_cmd.parser.add_option(
u'--from-scratch', dest='from_scratch', action='store_true',
help=u'erase existing metadata before applying new metadata'
)
import_cmd.parser.add_option(
u'--flat', dest='flat', action='store_true',
help=u'import an entire tree as a single album'
@ -1044,16 +1028,22 @@ import_cmd.parser.add_option(
)
import_cmd.parser.add_option(
u'-S', u'--search-id', dest='search_ids', action='append',
metavar='BACKEND_ID',
metavar='ID',
help=u'restrict matching to a specific metadata backend ID'
)
import_cmd.parser.add_option(
u'--set', dest='set_fields', action='callback',
callback=_store_dict,
metavar='FIELD=VALUE',
help=u'set the given fields to the supplied values'
)
import_cmd.func = import_func
default_commands.append(import_cmd)
# list: Query and show library contents.
def list_items(lib, query, album, fmt=''):
def list_items(lib, query, album, fmt=u''):
"""Print out items in lib matching query. If album, then search for
albums instead of single items.
"""
@ -1079,11 +1069,18 @@ default_commands.append(list_cmd)
# update: Update library contents according to on-disk tags.
def update_items(lib, query, album, move, pretend):
def update_items(lib, query, album, move, pretend, fields):
"""For all the items matched by the query, update the library to
reflect the item's embedded tags.
:param fields: The fields to be stored. If not specified, all fields will
be.
"""
with lib.transaction():
if move and fields is not None and 'path' not in fields:
# Special case: if an item needs to be moved, the path field has to
# updated; otherwise the new path will not be reflected in the
# database.
fields.append('path')
items, _ = _do_query(lib, query, album)
# Walk through the items and pick up their changes.
@ -1122,24 +1119,25 @@ def update_items(lib, query, album, move, pretend):
item._dirty.discard(u'albumartist')
# Check for and display changes.
changed = ui.show_model_changes(item,
fields=library.Item._media_fields)
changed = ui.show_model_changes(
item,
fields=fields or library.Item._media_fields)
# Save changes.
if not pretend:
if changed:
# Move the item if it's in the library.
if move and lib.directory in ancestry(item.path):
item.move()
item.move(store=False)
item.store()
item.store(fields=fields)
affected_albums.add(item.album_id)
else:
# The file's mtime was different, but there were no
# changes to the metadata. Store the new mtime,
# which is set in the call to read(), so we don't
# check this again in the future.
item.store()
item.store(fields=fields)
# Skip album changes while pretending.
if pretend:
@ -1158,17 +1156,24 @@ def update_items(lib, query, album, move, pretend):
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
album[key] = first_item[key]
album.store()
album.store(fields=fields)
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
log.debug(u'moving album {0}', album_id)
album.move()
# Manually moving and storing the album.
items = list(album.items())
for item in items:
item.move(store=False)
item.store(fields=fields)
album.move(store=False)
album.store(fields=fields)
def update_func(lib, opts, args):
update_items(lib, decargs(args), opts.album, ui.should_move(opts.move),
opts.pretend)
opts.pretend, opts.fields)
update_cmd = ui.Subcommand(
@ -1188,19 +1193,25 @@ update_cmd.parser.add_option(
u'-p', u'--pretend', action='store_true',
help=u"show all changes but do nothing"
)
update_cmd.parser.add_option(
u'-F', u'--field', default=None, action='append', dest='fields',
help=u'list of fields to update'
)
update_cmd.func = update_func
default_commands.append(update_cmd)
# remove: Remove items from library, delete files.
def remove_items(lib, query, album, delete):
def remove_items(lib, query, album, delete, force):
"""Remove items matching query from lib. If album, then match and
remove whole albums. If delete, also remove files from disk.
"""
# Get the matching items.
items, albums = _do_query(lib, query, album)
# Confirm file removal if not forcing removal.
if not force:
# Prepare confirmation with user.
print_()
if delete:
@ -1208,7 +1219,7 @@ def remove_items(lib, query, album, delete):
prompt = u'Really DELETE %i file%s (y/n)?' % \
(len(items), 's' if len(items) > 1 else '')
else:
fmt = ''
fmt = u''
prompt = u'Really remove %i item%s from the library (y/n)?' % \
(len(items), 's' if len(items) > 1 else '')
@ -1227,7 +1238,7 @@ def remove_items(lib, query, album, delete):
def remove_func(lib, opts, args):
remove_items(lib, decargs(args), opts.album, opts.delete)
remove_items(lib, decargs(args), opts.album, opts.delete, opts.force)
remove_cmd = ui.Subcommand(
@ -1237,6 +1248,10 @@ remove_cmd.parser.add_option(
u"-d", u"--delete", action="store_true",
help=u"also remove files from disk"
)
remove_cmd.parser.add_option(
u"-f", u"--force", action="store_true",
help=u"do not ask when removing items"
)
remove_cmd.parser.add_album_option()
remove_cmd.func = remove_func
default_commands.append(remove_cmd)
@ -1310,6 +1325,7 @@ default_commands.append(stats_cmd)
def show_version(lib, opts, args):
print_(u'beets version %s' % beets.__version__)
print_(u'Python version {}'.format(python_version()))
# Show plugins.
names = sorted(p.name for p in plugins.find_plugins())
if names:
@ -1454,7 +1470,8 @@ default_commands.append(modify_cmd)
# move: Move/copy files to the library or a new base directory.
def move_items(lib, dest, query, copy, album, pretend, confirm=False):
def move_items(lib, dest, query, copy, album, pretend, confirm=False,
export=False):
"""Moves or copies items to a new base directory, given by dest. If
dest is None, then the library's base directory is used, making the
command "consolidate" files.
@ -1467,6 +1484,7 @@ def move_items(lib, dest, query, copy, album, pretend, confirm=False):
isalbummoved = lambda album: any(isitemmoved(i) for i in album.items())
objs = [o for o in objs if (isalbummoved if album else isitemmoved)(o)]
copy = copy or export # Exporting always copies.
action = u'Copying' if copy else u'Moving'
act = u'copy' if copy else u'move'
entity = u'album' if album else u'item'
@ -1492,8 +1510,16 @@ def move_items(lib, dest, query, copy, album, pretend, confirm=False):
for obj in objs:
log.debug(u'moving: {0}', util.displayable_path(obj.path))
obj.move(copy, basedir=dest)
obj.store()
if export:
# Copy without affecting the database.
obj.move(operation=MoveOperation.COPY, basedir=dest,
store=False)
else:
# Ordinary move/copy: store the new path.
if copy:
obj.move(operation=MoveOperation.COPY, basedir=dest)
else:
obj.move(operation=MoveOperation.MOVE, basedir=dest)
def move_func(lib, opts, args):
@ -1504,7 +1530,7 @@ def move_func(lib, opts, args):
raise ui.UserError(u'no such directory: %s' % dest)
move_items(lib, dest, decargs(args), opts.copy, opts.album, opts.pretend,
opts.timid)
opts.timid, opts.export)
move_cmd = ui.Subcommand(
@ -1526,6 +1552,10 @@ move_cmd.parser.add_option(
u'-t', u'--timid', dest='timid', action='store_true',
help=u'always confirm all actions'
)
move_cmd.parser.add_option(
u'-e', u'--export', default=False, action='store_true',
help=u'copy without changing the database path'
)
move_cmd.parser.add_album_option()
move_cmd.func = move_func
default_commands.append(move_cmd)
@ -1601,7 +1631,7 @@ def config_func(lib, opts, args):
filenames.insert(0, user_path)
for filename in filenames:
print_(filename)
print_(displayable_path(filename))
# Open in editor.
elif opts.edit:
@ -1609,7 +1639,8 @@ def config_func(lib, opts, args):
# Dump configuration.
else:
print_(config.dump(full=opts.defaults, redact=opts.redact))
config_out = config.dump(full=opts.defaults, redact=opts.redact)
print_(util.text_string(config_out))
def config_edit():
@ -1655,17 +1686,19 @@ default_commands.append(config_cmd)
def print_completion(*args):
for line in completion_script(default_commands + plugins.commands()):
print_(line, end='')
print_(line, end=u'')
if not any(map(os.path.isfile, BASH_COMPLETION_PATHS)):
log.warn(u'Warning: Unable to find the bash-completion package. '
log.warning(u'Warning: Unable to find the bash-completion package. '
u'Command line completion might not work.')
BASH_COMPLETION_PATHS = map(syspath, [
u'/etc/bash_completion',
u'/usr/share/bash-completion/bash_completion',
u'/usr/share/local/bash-completion/bash_completion',
u'/opt/local/share/bash-completion/bash_completion', # SmartOS
u'/usr/local/etc/bash_completion', # Homebrew
u'/usr/local/share/bash-completion/bash_completion',
# SmartOS
u'/opt/local/share/bash-completion/bash_completion',
# Homebrew (before bash-completion2)
u'/usr/local/etc/bash_completion',
])
@ -1677,7 +1710,7 @@ def completion_script(commands):
"""
base_script = os.path.join(_package_path('beets.ui'), 'completion_base.sh')
with open(base_script, 'r') as base_script:
yield base_script.read()
yield util.text_string(base_script.read())
options = {}
aliases = {}
@ -1692,12 +1725,12 @@ def completion_script(commands):
if re.match(r'^\w+$', alias):
aliases[alias] = name
options[name] = {'flags': [], 'opts': []}
options[name] = {u'flags': [], u'opts': []}
for opts in cmd.parser._get_all_options()[1:]:
if opts.action in ('store_true', 'store_false'):
option_type = 'flags'
option_type = u'flags'
else:
option_type = 'opts'
option_type = u'opts'
options[name][option_type].extend(
opts._short_opts + opts._long_opts
@ -1705,14 +1738,14 @@ def completion_script(commands):
# Add global options
options['_global'] = {
'flags': [u'-v', u'--verbose'],
'opts': u'-l --library -c --config -d --directory -h --help'.split(
u' ')
u'flags': [u'-v', u'--verbose'],
u'opts':
u'-l --library -c --config -d --directory -h --help'.split(u' ')
}
# Add flags common to all commands
options['_common'] = {
'flags': [u'-h', u'--help']
u'flags': [u'-h', u'--help']
}
# Start generating the script
@ -1725,21 +1758,24 @@ def completion_script(commands):
# Command aliases
yield u" local aliases='%s'\n" % ' '.join(aliases.keys())
for alias, cmd in aliases.items():
yield u" local alias__%s=%s\n" % (alias, cmd)
yield u" local alias__%s=%s\n" % (alias.replace('-', '_'), cmd)
yield u'\n'
# Fields
yield u" fields='%s'\n" % ' '.join(
set(library.Item._fields.keys() + library.Album._fields.keys())
set(
list(library.Item._fields.keys()) +
list(library.Album._fields.keys())
)
)
# Command options
for cmd, opts in options.items():
for option_type, option_list in opts.items():
if option_list:
option_list = ' '.join(option_list)
option_list = u' '.join(option_list)
yield u" local %s__%s='%s'\n" % (
option_type, cmd, option_list)
option_type, cmd.replace('-', '_'), option_list)
yield u' _beet_dispatch\n'
yield u'}\n'

View file

@ -70,7 +70,7 @@ _beet_dispatch() {
# Replace command shortcuts
if [[ -n $cmd ]] && _list_include_item "$aliases" "$cmd"; then
eval "cmd=\$alias__$cmd"
eval "cmd=\$alias__${cmd//-/_}"
fi
case $cmd in
@ -94,8 +94,8 @@ _beet_dispatch() {
_beet_complete() {
if [[ $cur == -* ]]; then
local opts flags completions
eval "opts=\$opts__$cmd"
eval "flags=\$flags__$cmd"
eval "opts=\$opts__${cmd//-/_}"
eval "flags=\$flags__${cmd//-/_}"
completions="${flags___common} ${opts} ${flags}"
COMPREPLY+=( $(compgen -W "$completions" -- $cur) )
else
@ -129,7 +129,7 @@ _beet_complete_global() {
COMPREPLY+=( $(compgen -W "$completions" -- $cur) )
elif [[ -n $cur ]] && _list_include_item "$aliases" "$cur"; then
local cmd
eval "cmd=\$alias__$cur"
eval "cmd=\$alias__${cur//-/_}"
COMPREPLY+=( "$cmd" )
else
COMPREPLY+=( $(compgen -W "$commands" -- $cur) )
@ -138,7 +138,7 @@ _beet_complete_global() {
_beet_complete_query() {
local opts
eval "opts=\$opts__$cmd"
eval "opts=\$opts__${cmd//-/_}"
if [[ $cur == -* ]] || _list_include_item "$opts" "$prev"; then
_beet_complete

View file

@ -18,6 +18,8 @@
from __future__ import division, absolute_import, print_function
import os
import sys
import errno
import locale
import re
import shutil
import fnmatch
@ -27,10 +29,14 @@ import subprocess
import platform
import shlex
from beets.util import hidden
import six
from unidecode import unidecode
from enum import Enum
MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = u'\\\\?\\'
SNI_SUPPORTED = sys.version_info >= (2, 7, 9)
class HumanReadableException(Exception):
@ -65,14 +71,14 @@ class HumanReadableException(Exception):
def _reasonstr(self):
"""Get the reason as a string."""
if isinstance(self.reason, unicode):
if isinstance(self.reason, six.text_type):
return self.reason
elif isinstance(self.reason, basestring): # Byte string.
return self.reason.decode('utf8', 'ignore')
elif isinstance(self.reason, bytes):
return self.reason.decode('utf-8', 'ignore')
elif hasattr(self.reason, 'strerror'): # i.e., EnvironmentError
return self.reason.strerror
else:
return u'"{0}"'.format(unicode(self.reason))
return u'"{0}"'.format(six.text_type(self.reason))
def get_message(self):
"""Create the human-readable description of the error, sans
@ -119,6 +125,15 @@ class FilesystemError(HumanReadableException):
return u'{0} {1}'.format(self._reasonstr(), clause)
class MoveOperation(Enum):
"""The file operations that e.g. various move functions can carry out.
"""
MOVE = 0
COPY = 1
LINK = 2
HARDLINK = 3
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
@ -158,15 +173,16 @@ def sorted_walk(path, ignore=(), ignore_hidden=False, logger=None):
pattern in `ignore` are skipped. If `logger` is provided, then
warning messages are logged there when a directory cannot be listed.
"""
# Make sure the path isn't a Unicode string.
# Make sure the pathes aren't Unicode strings.
path = bytestring_path(path)
ignore = [bytestring_path(i) for i in ignore]
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(path))
except OSError as exc:
if logger:
logger.warn(u'could not list directory {0}: {1}'.format(
logger.warning(u'could not list directory {0}: {1}'.format(
displayable_path(path), exc.strerror
))
return
@ -264,7 +280,9 @@ def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
if not os.path.exists(directory):
# Directory gone already.
continue
if fnmatch_all(os.listdir(directory), clutter):
clutter = [bytestring_path(c) for c in clutter]
match_paths = [bytestring_path(d) for d in os.listdir(directory)]
if fnmatch_all(match_paths, clutter):
# Directory contains only clutter (or nothing).
try:
shutil.rmtree(directory)
@ -298,6 +316,18 @@ def components(path):
return comps
def arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf-8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf-8'
def _fsencoding():
"""Get the system's filesystem encoding. On Windows, this is always
UTF-8 (not MBCS).
@ -309,7 +339,7 @@ def _fsencoding():
# for Windows paths, so the encoding is actually immaterial so
# we can avoid dealing with this nastiness. We arbitrarily
# choose UTF-8.
encoding = 'utf8'
encoding = 'utf-8'
return encoding
@ -327,11 +357,14 @@ def bytestring_path(path):
if os.path.__name__ == 'ntpath' and path.startswith(WINDOWS_MAGIC_PREFIX):
path = path[len(WINDOWS_MAGIC_PREFIX):]
# Try to encode with default encodings, but fall back to UTF8.
# Try to encode with default encodings, but fall back to utf-8.
try:
return path.encode(_fsencoding())
except (UnicodeError, LookupError):
return path.encode('utf8')
return path.encode('utf-8')
PATH_SEP = bytestring_path(os.sep)
def displayable_path(path, separator=u'; '):
@ -341,16 +374,16 @@ def displayable_path(path, separator=u'; '):
"""
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, unicode):
elif isinstance(path, six.text_type):
return path
elif not isinstance(path, bytes):
# A non-string object: just get its unicode representation.
return unicode(path)
return six.text_type(path)
try:
return path.decode(_fsencoding(), 'ignore')
except (UnicodeError, LookupError):
return path.decode('utf8', 'ignore')
return path.decode('utf-8', 'ignore')
def syspath(path, prefix=True):
@ -364,12 +397,12 @@ def syspath(path, prefix=True):
if os.path.__name__ != 'ntpath':
return path
if not isinstance(path, unicode):
if not isinstance(path, six.text_type):
# Beets currently represents Windows paths internally with UTF-8
# arbitrarily. But earlier versions used MBCS because it is
# reported as the FS encoding by Windows. Try both.
try:
path = path.decode('utf8')
path = path.decode('utf-8')
except UnicodeError:
# The encoding should always be MBCS, Windows' broken
# Unicode representation.
@ -389,6 +422,8 @@ def syspath(path, prefix=True):
def samefile(p1, p2):
"""Safer equality for paths."""
if p1 == p2:
return True
return shutil._samefile(syspath(p1), syspath(p2))
@ -437,8 +472,7 @@ def move(path, dest, replace=False):
path = syspath(path)
dest = syspath(dest)
if os.path.exists(dest) and not replace:
raise FilesystemError(u'file exists', 'rename', (path, dest),
traceback.format_exc())
raise FilesystemError(u'file exists', 'rename', (path, dest))
# First, try renaming the file.
try:
@ -456,20 +490,49 @@ def move(path, dest, replace=False):
def link(path, dest, replace=False):
"""Create a symbolic link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`."""
if (samefile(path, dest)):
`path` == `dest`.
"""
if samefile(path, dest):
return
path = syspath(path)
dest = syspath(dest)
if os.path.exists(dest) and not replace:
raise FilesystemError(u'file exists', 'rename', (path, dest),
traceback.format_exc())
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError(u'file exists', 'rename', (path, dest))
try:
os.symlink(path, dest)
except OSError:
raise FilesystemError(u'Operating system does not support symbolic '
u'links.', 'link', (path, dest),
os.symlink(syspath(path), syspath(dest))
except NotImplementedError:
# raised on python >= 3.2 and Windows versions before Vista
raise FilesystemError(u'OS does not support symbolic links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
# TODO: Windows version checks can be removed for python 3
if hasattr('sys', 'getwindowsversion'):
if sys.getwindowsversion()[0] < 6: # is before Vista
exc = u'OS does not support symbolic links.'
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
def hardlink(path, dest, replace=False):
"""Create a hard link from path to `dest`. Raises an OSError if
`dest` already exists, unless `replace` is True. Does nothing if
`path` == `dest`.
"""
if samefile(path, dest):
return
if os.path.exists(syspath(dest)) and not replace:
raise FilesystemError(u'file exists', 'rename', (path, dest))
try:
os.link(syspath(path), syspath(dest))
except NotImplementedError:
raise FilesystemError(u'OS does not support hard links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
if exc.errno == errno.EXDEV:
raise FilesystemError(u'Cannot hard link across devices.'
'link', (path, dest), traceback.format_exc())
else:
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
@ -490,7 +553,8 @@ def unique_path(path):
num = 0
while True:
num += 1
new_path = b'%s.%i%s' % (base, num, ext)
suffix = u'.{}'.format(num).encode() + ext
new_path = base + suffix
if not os.path.exists(new_path):
return new_path
@ -594,7 +658,7 @@ def legalize_path(path, replacements, length, extension, fragment):
if fragment:
# Outputting Unicode.
extension = extension.decode('utf8', 'ignore')
extension = extension.decode('utf-8', 'ignore')
first_stage_path, _ = _legalize_stage(
path, replacements, length, extension, fragment
@ -618,6 +682,24 @@ def legalize_path(path, replacements, length, extension, fragment):
return second_stage_path, retruncated
def py3_path(path):
"""Convert a bytestring path to Unicode on Python 3 only. On Python
2, return the bytestring path unchanged.
This helps deal with APIs on Python 3 that *only* accept Unicode
(i.e., `str` objects). I philosophically disagree with this
decision, because paths are sadly bytes on Unix, but that's the way
it is. So this function helps us "smuggle" the true bytes data
through APIs that took Python 3's Unicode mandate too seriously.
"""
if isinstance(path, six.text_type):
return path
assert isinstance(path, bytes)
if six.PY2:
return path
return os.fsdecode(path)
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
return value.lower() in (u'yes', u'1', u'true', u't', u'y')
@ -627,14 +709,32 @@ def as_string(value):
"""Convert a value to a Unicode object for matching with a query.
None becomes the empty string. Bytestrings are silently decoded.
"""
if six.PY2:
buffer_types = buffer, memoryview # noqa: F821
else:
buffer_types = memoryview
if value is None:
return u''
elif isinstance(value, buffer):
return bytes(value).decode('utf8', 'ignore')
elif isinstance(value, buffer_types):
return bytes(value).decode('utf-8', 'ignore')
elif isinstance(value, bytes):
return value.decode('utf8', 'ignore')
return value.decode('utf-8', 'ignore')
else:
return unicode(value)
return six.text_type(value)
def text_string(value, encoding='utf-8'):
"""Convert a string, which can either be bytes or unicode, to
unicode.
Text (unicode) is left untouched; bytes are decoded. This is useful
to convert from a "native string" (bytes on Python 2, str on Python
3) to a consistently unicode value.
"""
if isinstance(value, bytes):
return value.decode(encoding)
return value
def plurality(objs):
@ -661,7 +761,7 @@ def cpu_count():
num = 0
elif sys.platform == 'darwin':
try:
num = int(command_output([b'/usr/sbin/sysctl', b'-n', b'hw.ncpu']))
num = int(command_output(['/usr/sbin/sysctl', '-n', 'hw.ncpu']))
except (ValueError, OSError, subprocess.CalledProcessError):
num = 0
else:
@ -675,10 +775,28 @@ def cpu_count():
return 1
def convert_command_args(args):
"""Convert command arguments to bytestrings on Python 2 and
surrogate-escaped strings on Python 3."""
assert isinstance(args, list)
def convert(arg):
if six.PY2:
if isinstance(arg, six.text_type):
arg = arg.encode(arg_encoding())
else:
if isinstance(arg, bytes):
arg = arg.decode(arg_encoding(), 'surrogateescape')
return arg
return [convert(a) for a in args]
def command_output(cmd, shell=False):
"""Runs the command and returns its output after it has exited.
``cmd`` is a list of byte string arguments starting with the command names.
``cmd`` is a list of arguments starting with the command names. The
arguments are bytes on Unix and strings on Windows.
If ``shell`` is true, ``cmd`` is assumed to be a string and passed to a
shell to execute.
@ -689,10 +807,18 @@ def command_output(cmd, shell=False):
This replaces `subprocess.check_output` which can have problems if lots of
output is sent to stderr.
"""
cmd = convert_command_args(cmd)
try: # python >= 3.3
devnull = subprocess.DEVNULL
except AttributeError:
devnull = open(os.devnull, 'r+b')
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=devnull,
close_fds=platform.system() != 'Windows',
shell=shell
)
@ -700,7 +826,7 @@ def command_output(cmd, shell=False):
if proc.returncode:
raise subprocess.CalledProcessError(
returncode=proc.returncode,
cmd=b' '.join(cmd),
cmd=' '.join(cmd),
output=stdout + stderr,
)
return stdout
@ -756,15 +882,14 @@ def shlex_split(s):
Raise `ValueError` if the string is not a well-formed shell string.
This is a workaround for a bug in some versions of Python.
"""
if isinstance(s, bytes):
# Shlex works fine.
if not six.PY2 or isinstance(s, bytes): # Shlex works fine.
return shlex.split(s)
elif isinstance(s, unicode):
elif isinstance(s, six.text_type):
# Work around a Python bug.
# http://bugs.python.org/issue6988
bs = s.encode('utf8')
return [c.decode('utf8') for c in shlex.split(bs)]
bs = s.encode('utf-8')
return [c.decode('utf-8') for c in shlex.split(bs)]
else:
raise TypeError(u'shlex_split called with non-string')
@ -796,8 +921,8 @@ def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, unicode):
short_path = unicode(short_path)
if not isinstance(short_path, six.text_type):
short_path = short_path.decode(_fsencoding())
import ctypes
buf = ctypes.create_unicode_buffer(260)
@ -860,3 +985,27 @@ def raw_seconds_short(string):
raise ValueError(u'String not in M:SS format')
minutes, seconds = map(int, match.groups())
return float(minutes * 60 + seconds)
def asciify_path(path, sep_replace):
"""Decodes all unicode characters in a path into ASCII equivalents.
Substitutions are provided by the unidecode module. Path separators in the
input are preserved.
Keyword arguments:
path -- The path to be asciified.
sep_replace -- the string to be used to replace extraneous path separators.
"""
# if this platform has an os.altsep, change it to os.sep.
if os.altsep:
path = path.replace(os.altsep, os.sep)
path_components = path.split(os.sep)
for index, item in enumerate(path_components):
path_components[index] = unidecode(item).replace(os.sep, sep_replace)
if os.altsep:
path_components[index] = unidecode(item).replace(
os.altsep,
sep_replace
)
return os.sep.join(path_components)

View file

@ -18,20 +18,23 @@ public resizing proxy if neither is available.
"""
from __future__ import division, absolute_import, print_function
import urllib
import subprocess
import os
import re
from tempfile import NamedTemporaryFile
from six.moves.urllib.parse import urlencode
from beets import logging
from beets import util
import six
# Resizing methods
PIL = 1
IMAGEMAGICK = 2
WEBPROXY = 3
if util.SNI_SUPPORTED:
PROXY_URL = 'https://images.weserv.nl/'
else:
PROXY_URL = 'http://images.weserv.nl/'
log = logging.getLogger('beets')
@ -41,9 +44,9 @@ def resize_url(url, maxwidth):
"""Return a proxied image URL that resizes the original image to
maxwidth (preserving aspect ratio).
"""
return '{0}?{1}'.format(PROXY_URL, urllib.urlencode({
return '{0}?{1}'.format(PROXY_URL, urlencode({
'url': url.replace('http://', ''),
'w': bytes(maxwidth),
'w': maxwidth,
}))
@ -52,8 +55,8 @@ def temp_file_for(path):
specified path.
"""
ext = os.path.splitext(path)[1]
with NamedTemporaryFile(suffix=ext, delete=False) as f:
return f.name
with NamedTemporaryFile(suffix=util.py3_path(ext), delete=False) as f:
return util.bytestring_path(f.name)
def pil_resize(maxwidth, path_in, path_out=None):
@ -85,18 +88,17 @@ def im_resize(maxwidth, path_in, path_out=None):
log.debug(u'artresizer: ImageMagick resizing {0} to {1}',
util.displayable_path(path_in), util.displayable_path(path_out))
# "-resize widthxheight>" shrinks images with dimension(s) larger
# than the corresponding width and/or height dimension(s). The >
# "only shrink" flag is prefixed by ^ escape char for Windows
# compatibility.
# "-resize WIDTHx>" shrinks images with the width larger
# than the given width while maintaining the aspect ratio
# with regards to the height.
try:
util.command_output([
b'convert', util.syspath(path_in, prefix=False),
b'-resize', b'{0}x^>'.format(maxwidth),
'convert', util.syspath(path_in, prefix=False),
'-resize', '{0}x>'.format(maxwidth),
util.syspath(path_out, prefix=False),
])
except subprocess.CalledProcessError:
log.warn(u'artresizer: IM convert failed for {0}',
log.warning(u'artresizer: IM convert failed for {0}',
util.displayable_path(path_in))
return path_in
return path_out
@ -119,12 +121,12 @@ def pil_getsize(path_in):
def im_getsize(path_in):
cmd = [b'identify', b'-format', b'%w %h',
cmd = ['identify', '-format', '%w %h',
util.syspath(path_in, prefix=False)]
try:
out = util.command_output(cmd)
except subprocess.CalledProcessError as exc:
log.warn(u'ImageMagick size query failed')
log.warning(u'ImageMagick size query failed')
log.debug(
u'`convert` exited with (status {}) when '
u'getting size with command {}:\n{}',
@ -134,7 +136,7 @@ def im_getsize(path_in):
try:
return tuple(map(int, out.split(b' ')))
except IndexError:
log.warn(u'Could not understand IM output: {0!r}', out)
log.warning(u'Could not understand IM output: {0!r}', out)
BACKEND_GET_SIZE = {
@ -149,21 +151,20 @@ class Shareable(type):
lazily-created shared instance of ``MyClass`` while calling
``MyClass()`` to construct a new object works as usual.
"""
def __init__(self, name, bases, dict):
super(Shareable, self).__init__(name, bases, dict)
self._instance = None
def __init__(cls, name, bases, dict):
super(Shareable, cls).__init__(name, bases, dict)
cls._instance = None
@property
def shared(self):
if self._instance is None:
self._instance = self()
return self._instance
def shared(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
class ArtResizer(object):
class ArtResizer(six.with_metaclass(Shareable, object)):
"""A singleton class that performs image resizes.
"""
__metaclass__ = Shareable
def __init__(self):
"""Create a resizer object with an inferred method.
@ -231,12 +232,13 @@ class ArtResizer(object):
def get_im_version():
"""Return Image Magick version or None if it is unavailable
Try invoking ImageMagick's "convert"."""
Try invoking ImageMagick's "convert".
"""
try:
out = util.command_output([b'identify', b'--version'])
out = util.command_output(['convert', '--version'])
if 'imagemagick' in out.lower():
pattern = r".+ (\d+)\.(\d+)\.(\d+).*"
if b'imagemagick' in out.lower():
pattern = br".+ (\d+)\.(\d+)\.(\d+).*"
match = re.search(pattern, out)
if match:
return (int(match.group(1)),
@ -244,7 +246,8 @@ def get_im_version():
int(match.group(3)))
return (0,)
except (subprocess.CalledProcessError, OSError):
except (subprocess.CalledProcessError, OSError) as exc:
log.debug(u'ImageMagick check `convert --version` failed: {}', exc)
return None

View file

@ -9,6 +9,7 @@ Bluelet: easy concurrency without all the messy parallelism.
"""
from __future__ import division, absolute_import, print_function
import six
import socket
import select
import sys
@ -19,20 +20,6 @@ import time
import collections
# A little bit of "six" (Python 2/3 compatibility): cope with PEP 3109 syntax
# changes.
PY3 = sys.version_info[0] == 3
if PY3:
def _reraise(typ, exc, tb):
raise exc.with_traceback(tb)
else:
exec("""
def _reraise(typ, exc, tb):
raise typ, exc, tb
""")
# Basic events used for thread scheduling.
class Event(object):
@ -214,7 +201,7 @@ class ThreadException(Exception):
self.exc_info = exc_info
def reraise(self):
_reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
six.reraise(self.exc_info[0], self.exc_info[1], self.exc_info[2])
SUSPENDED = Event() # Special sentinel placeholder for suspended threads.
@ -282,7 +269,7 @@ def run(root_coro):
except StopIteration:
# Thread is done.
complete_thread(coro, None)
except:
except BaseException:
# Thread raised some other exception.
del threads[coro]
raise ThreadException(coro, sys.exc_info())
@ -379,7 +366,7 @@ def run(root_coro):
exit_te = te
break
except:
except BaseException:
# For instance, KeyboardInterrupt during select(). Raise
# into root thread and terminate others.
threads = {root_coro: ExceptionEvent(sys.exc_info())}

View file

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
# This file is part of Confit.
# This file is part of Confuse.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
@ -24,10 +24,7 @@ import sys
import yaml
import collections
import re
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
UNIX_DIR_VAR = 'XDG_CONFIG_HOME'
UNIX_DIR_FALLBACK = '~/.config'
@ -47,9 +44,9 @@ REDACTED_TOMBSTONE = 'REDACTED'
# Utilities.
PY3 = sys.version_info[0] == 3
STRING = str if PY3 else unicode
BASESTRING = str if PY3 else basestring
NUMERIC_TYPES = (int, float) if PY3 else (int, float, long)
STRING = str if PY3 else unicode # noqa: F821
BASESTRING = str if PY3 else basestring # noqa: F821
NUMERIC_TYPES = (int, float) if PY3 else (int, float, long) # noqa: F821
def iter_first(sequence):
@ -248,10 +245,15 @@ class ConfigView(object):
def set_args(self, namespace):
"""Overlay parsed command-line arguments, generated by a library
like argparse or optparse, onto this view's value.
like argparse or optparse, onto this view's value. ``namespace``
can be a ``dict`` or namespace object.
"""
args = {}
for key, value in namespace.__dict__.items():
if isinstance(namespace, dict):
items = namespace.items()
else:
items = namespace.__dict__.items()
for key, value in items:
if value is not None: # Avoid unset options.
args[key] = value
self.set(args)
@ -386,19 +388,42 @@ class ConfigView(object):
"""
return as_template(template).value(self, template)
# Old validation methods (deprecated).
# Shortcuts for common templates.
def as_filename(self):
"""Get the value as a path. Equivalent to `get(Filename())`.
"""
return self.get(Filename())
def as_choice(self, choices):
"""Get the value from a list of choices. Equivalent to
`get(Choice(choices))`.
"""
return self.get(Choice(choices))
def as_number(self):
"""Get the value as any number type: int or float. Equivalent to
`get(Number())`.
"""
return self.get(Number())
def as_str_seq(self):
return self.get(StrSeq())
def as_str_seq(self, split=True):
"""Get the value as a sequence of strings. Equivalent to
`get(StrSeq())`.
"""
return self.get(StrSeq(split=split))
def as_pairs(self, default_value=None):
"""Get the value as a sequence of pairs of two strings. Equivalent to
`get(Pairs())`.
"""
return self.get(Pairs(default_value=default_value))
def as_str(self):
"""Get the value as a (Unicode) string. Equivalent to
`get(unicode)` on Python 2 and `get(str)` on Python 3.
"""
return self.get(String())
# Redaction.
@ -484,10 +509,9 @@ class Subview(ConfigView):
self.name += '.'
if isinstance(self.key, int):
self.name += u'#{0}'.format(self.key)
elif isinstance(self.key, BASESTRING):
if isinstance(self.key, bytes):
self.name += self.key.decode('utf8')
else:
elif isinstance(self.key, bytes):
self.name += self.key.decode('utf-8')
elif isinstance(self.key, STRING):
self.name += self.key
else:
self.name += repr(self.key)
@ -650,7 +674,7 @@ def load_yaml(filename):
parsed, a ConfigReadError is raised.
"""
try:
with open(filename, 'r') as f:
with open(filename, 'rb') as f:
return yaml.load(f, Loader=Loader)
except (IOError, yaml.error.YAMLError) as exc:
raise ConfigReadError(filename, exc)
@ -890,9 +914,10 @@ class Configuration(RootView):
default_source = source
break
if default_source and default_source.filename:
with open(default_source.filename, 'r') as fp:
with open(default_source.filename, 'rb') as fp:
default_data = fp.read()
yaml_out = restore_yaml_comments(yaml_out, default_data)
yaml_out = restore_yaml_comments(yaml_out,
default_data.decode('utf8'))
return yaml_out
@ -953,7 +978,7 @@ should be raised when the value is missing.
class Template(object):
"""A value template for configuration fields.
The template works like a type and instructs Confit about how to
The template works like a type and instructs Confuse about how to
interpret a deserialized YAML value. This includes type conversions,
providing a default value, and validating for errors. For example, a
filepath type might expand tildes and check that the file exists.
@ -1223,30 +1248,77 @@ class StrSeq(Template):
super(StrSeq, self).__init__()
self.split = split
def _convert_value(self, x, view):
if isinstance(x, STRING):
return x
elif isinstance(x, bytes):
return x.decode('utf-8', 'ignore')
else:
self.fail(u'must be a list of strings', view, True)
def convert(self, value, view):
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
value = value.decode('utf-8', 'ignore')
if isinstance(value, STRING):
if self.split:
return value.split()
value = value.split()
else:
value = [value]
else:
return [value]
try:
value = list(value)
except TypeError:
self.fail(u'must be a whitespace-separated string or a list',
view, True)
def convert(x):
if isinstance(x, STRING):
return x
elif isinstance(x, bytes):
return x.decode('utf8', 'ignore')
return [self._convert_value(v, view) for v in value]
class Pairs(StrSeq):
"""A template for ordered key-value pairs.
This can either be given with the same syntax as for `StrSeq` (i.e. without
values), or as a list of strings and/or single-element mappings such as::
- key: value
- [key, value]
- key
The result is a list of two-element tuples. If no value is provided, the
`default_value` will be returned as the second element.
"""
def __init__(self, default_value=None):
"""Create a new template.
`default` is the dictionary value returned for items that are not
a mapping, but a single string.
"""
super(Pairs, self).__init__(split=True)
self.default_value = default_value
def _convert_value(self, x, view):
try:
return (super(Pairs, self)._convert_value(x, view),
self.default_value)
except ConfigTypeError:
if isinstance(x, collections.Mapping):
if len(x) != 1:
self.fail(u'must be a single-element mapping', view, True)
k, v = iter_first(x.items())
elif isinstance(x, collections.Sequence):
if len(x) != 2:
self.fail(u'must be a two-element list', view, True)
k, v = x
else:
self.fail(u'must be a list of strings', view, True)
return list(map(convert, value))
# Is this even possible? -> Likely, if some !directive cause
# YAML to parse this to some custom type.
self.fail(u'must be a single string, mapping, or a list'
u'' + str(x),
view, True)
return (super(Pairs, self)._convert_value(k, view),
super(Pairs, self)._convert_value(v, view))
class Filename(Template):

View file

@ -33,8 +33,8 @@ import re
import ast
import dis
import types
from .confit import NUMERIC_TYPES
import sys
import six
SYMBOL_DELIM = u'$'
FUNC_DELIM = u'%'
@ -74,11 +74,11 @@ def ex_literal(val):
"""
if val is None:
return ast.Name('None', ast.Load())
elif isinstance(val, NUMERIC_TYPES):
elif isinstance(val, six.integer_types):
return ast.Num(val)
elif isinstance(val, bool):
return ast.Name(bytes(val), ast.Load())
elif isinstance(val, basestring):
elif isinstance(val, six.string_types):
return ast.Str(val)
raise TypeError(u'no literal for {0}'.format(type(val)))
@ -97,7 +97,7 @@ def ex_call(func, args):
function may be an expression or the name of a function. Each
argument may be an expression or a value to be used as a literal.
"""
if isinstance(func, basestring):
if isinstance(func, six.string_types):
func = ex_rvalue(func)
args = list(args)
@ -105,7 +105,10 @@ def ex_call(func, args):
if not isinstance(args[i], ast.expr):
args[i] = ex_literal(args[i])
if sys.version_info[:2] < (3, 5):
return ast.Call(func, args, [], None, None)
else:
return ast.Call(func, args, [])
def compile_func(arg_names, statements, name='_the_func', debug=False):
@ -113,16 +116,31 @@ def compile_func(arg_names, statements, name='_the_func', debug=False):
the resulting Python function. If `debug`, then print out the
bytecode of the compiled function.
"""
if six.PY2:
func_def = ast.FunctionDef(
name.encode('utf8'),
ast.arguments(
[ast.Name(n, ast.Param()) for n in arg_names],
None, None,
[ex_literal(None) for _ in arg_names],
name=name.encode('utf-8'),
args=ast.arguments(
args=[ast.Name(n, ast.Param()) for n in arg_names],
vararg=None,
kwarg=None,
defaults=[ex_literal(None) for _ in arg_names],
),
statements,
[],
body=statements,
decorator_list=[],
)
else:
func_def = ast.FunctionDef(
name=name,
args=ast.arguments(
args=[ast.arg(arg=n, annotation=None) for n in arg_names],
kwonlyargs=[],
kw_defaults=[],
defaults=[ex_literal(None) for _ in arg_names],
),
body=statements,
decorator_list=[],
)
mod = ast.Module([func_def])
ast.fix_missing_locations(mod)
@ -164,8 +182,12 @@ class Symbol(object):
def translate(self):
"""Compile the variable lookup."""
expr = ex_rvalue(VARIABLE_PREFIX + self.ident.encode('utf8'))
return [expr], set([self.ident.encode('utf8')]), set()
if six.PY2:
ident = self.ident.encode('utf-8')
else:
ident = self.ident
expr = ex_rvalue(VARIABLE_PREFIX + ident)
return [expr], set([ident]), set()
class Call(object):
@ -190,15 +212,19 @@ class Call(object):
except Exception as exc:
# Function raised exception! Maybe inlining the name of
# the exception will help debug.
return u'<%s>' % unicode(exc)
return unicode(out)
return u'<%s>' % six.text_type(exc)
return six.text_type(out)
else:
return self.original
def translate(self):
"""Compile the function call."""
varnames = set()
funcnames = set([self.ident.encode('utf8')])
if six.PY2:
ident = self.ident.encode('utf-8')
else:
ident = self.ident
funcnames = set([ident])
arg_exprs = []
for arg in self.args:
@ -213,14 +239,14 @@ class Call(object):
[ex_call(
'map',
[
ex_rvalue('unicode'),
ex_rvalue(six.text_type.__name__),
ast.List(subexprs, ast.Load()),
]
)],
))
subexpr_call = ex_call(
FUNCTION_PREFIX + self.ident.encode('utf8'),
FUNCTION_PREFIX + ident,
arg_exprs
)
return [subexpr_call], varnames, funcnames
@ -242,11 +268,11 @@ class Expression(object):
"""
out = []
for part in self.parts:
if isinstance(part, basestring):
if isinstance(part, six.string_types):
out.append(part)
else:
out.append(part.evaluate(env))
return u''.join(map(unicode, out))
return u''.join(map(six.text_type, out))
def translate(self):
"""Compile the expression to a list of Python AST expressions, a
@ -256,7 +282,7 @@ class Expression(object):
varnames = set()
funcnames = set()
for part in self.parts:
if isinstance(part, basestring):
if isinstance(part, six.string_types):
expressions.append(ex_literal(part))
else:
e, v, f = part.translate()
@ -285,16 +311,24 @@ class Parser(object):
replaced with a real, accepted parsing technique (PEG, parser
generator, etc.).
"""
def __init__(self, string):
def __init__(self, string, in_argument=False):
""" Create a new parser.
:param in_arguments: boolean that indicates the parser is to be
used for parsing function arguments, ie. considering commas
(`ARG_SEP`) a special character
"""
self.string = string
self.in_argument = in_argument
self.pos = 0
self.parts = []
# Common parsing resources.
special_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_OPEN, GROUP_CLOSE,
ARG_SEP, ESCAPE_CHAR)
special_char_re = re.compile(r'[%s]|$' %
ESCAPE_CHAR)
special_char_re = re.compile(r'[%s]|\Z' %
u''.join(re.escape(c) for c in special_chars))
escapable_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP)
terminator_chars = (GROUP_CLOSE,)
def parse_expression(self):
"""Parse a template expression starting at ``pos``. Resulting
@ -302,16 +336,29 @@ class Parser(object):
the ``parts`` field, a list. The ``pos`` field is updated to be
the next character after the expression.
"""
# Append comma (ARG_SEP) to the list of special characters only when
# parsing function arguments.
extra_special_chars = ()
special_char_re = self.special_char_re
if self.in_argument:
extra_special_chars = (ARG_SEP,)
special_char_re = re.compile(
r'[%s]|\Z' % u''.join(
re.escape(c) for c in
self.special_chars + extra_special_chars
)
)
text_parts = []
while self.pos < len(self.string):
char = self.string[self.pos]
if char not in self.special_chars:
if char not in self.special_chars + extra_special_chars:
# A non-special character. Skip to the next special
# character, treating the interstice as literal text.
next_pos = (
self.special_char_re.search(
special_char_re.search(
self.string[self.pos:]).start() + self.pos
)
text_parts.append(self.string[self.pos:next_pos])
@ -322,14 +369,14 @@ class Parser(object):
# The last character can never begin a structure, so we
# just interpret it as a literal character (unless it
# terminates the expression, as with , and }).
if char not in (GROUP_CLOSE, ARG_SEP):
if char not in self.terminator_chars + extra_special_chars:
text_parts.append(char)
self.pos += 1
break
next_char = self.string[self.pos + 1]
if char == ESCAPE_CHAR and next_char in \
(SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP):
if char == ESCAPE_CHAR and next_char in (self.escapable_chars +
extra_special_chars):
# An escaped special character ($$, $}, etc.). Note that
# ${ is not an escape sequence: this is ambiguous with
# the start of a symbol and it's not necessary (just
@ -349,7 +396,7 @@ class Parser(object):
elif char == FUNC_DELIM:
# Parse a function call.
self.parse_call()
elif char in (GROUP_CLOSE, ARG_SEP):
elif char in self.terminator_chars + extra_special_chars:
# Template terminated.
break
elif char == GROUP_OPEN:
@ -457,7 +504,7 @@ class Parser(object):
expressions = []
while self.pos < len(self.string):
subparser = Parser(self.string[self.pos:])
subparser = Parser(self.string[self.pos:], in_argument=True)
subparser.parse_expression()
# Extract and advance past the parsed expression.
@ -526,8 +573,9 @@ class Template(object):
"""
try:
res = self.compiled(values, functions)
except: # Handle any exceptions thrown by compiled version.
except Exception: # Handle any exceptions thrown by compiled version.
res = self.interpret(values, functions)
return res
def translate(self):
@ -563,7 +611,7 @@ if __name__ == '__main__':
import timeit
_tmpl = Template(u'foo $bar %baz{foozle $bar barzle} $bar')
_vars = {'bar': 'qux'}
_funcs = {'baz': unicode.upper}
_funcs = {'baz': six.text_type.upper}
interp_time = timeit.timeit('_tmpl.interpret(_vars, _funcs)',
'from __main__ import _tmpl, _vars, _funcs',
number=10000)

View file

@ -20,6 +20,7 @@ import os
import stat
import ctypes
import sys
import beets.util
def _is_hidden_osx(path):
@ -27,7 +28,7 @@ def _is_hidden_osx(path):
This uses os.lstat to work out if a file has the "hidden" flag.
"""
file_stat = os.lstat(path)
file_stat = os.lstat(beets.util.syspath(path))
if hasattr(file_stat, 'st_flags') and hasattr(stat, 'UF_HIDDEN'):
return bool(file_stat.st_flags & stat.UF_HIDDEN)
@ -45,7 +46,7 @@ def _is_hidden_win(path):
hidden_mask = 2
# Retrieve the attributes for the file.
attrs = ctypes.windll.kernel32.GetFileAttributesW(path)
attrs = ctypes.windll.kernel32.GetFileAttributesW(beets.util.syspath(path))
# Ensure we have valid attribues and compare them against the mask.
return attrs >= 0 and attrs & hidden_mask
@ -56,11 +57,12 @@ def _is_hidden_dot(path):
Files starting with a dot are seen as "hidden" files on Unix-based OSes.
"""
return os.path.basename(path).startswith('.')
return os.path.basename(path).startswith(b'.')
def is_hidden(path):
"""Return whether or not a file is hidden.
"""Return whether or not a file is hidden. `path` should be a
bytestring filename.
This method works differently depending on the platform it is called on.
@ -73,10 +75,6 @@ def is_hidden(path):
On any other operating systems (i.e. Linux), it uses `is_hidden_dot` to
work out if a file is hidden.
"""
# Convert the path to unicode if it is not already.
if not isinstance(path, unicode):
path = path.decode('utf-8')
# Run platform specific functions depending on the platform
if sys.platform == 'darwin':
return _is_hidden_osx(path) or _is_hidden_dot(path)

View file

@ -34,9 +34,10 @@ in place of any single coroutine.
from __future__ import division, absolute_import, print_function
import Queue
from six.moves import queue
from threading import Thread, Lock
import sys
import six
BUBBLE = '__PIPELINE_BUBBLE__'
POISON = '__PIPELINE_POISON__'
@ -63,7 +64,17 @@ def _invalidate_queue(q, val=None, sync=True):
q.mutex.acquire()
try:
q.maxsize = 0
# Originally, we set `maxsize` to 0 here, which is supposed to mean
# an unlimited queue size. However, there is a race condition since
# Python 3.2 when this attribute is changed while another thread is
# waiting in put()/get() due to a full/empty queue.
# Setting it to 2 is still hacky because Python does not give any
# guarantee what happens if Queue methods/attributes are overwritten
# when it is already in use. However, because of our dummy _put()
# and _get() methods, it provides a workaround to let the queue appear
# to be never empty or full.
# See issue https://github.com/beetbox/beets/issues/2078
q.maxsize = 2
q._qsize = _qsize
q._put = _put
q._get = _get
@ -75,13 +86,13 @@ def _invalidate_queue(q, val=None, sync=True):
q.mutex.release()
class CountedQueue(Queue.Queue):
class CountedQueue(queue.Queue):
"""A queue that keeps track of the number of threads that are
still feeding into it. The queue is poisoned when all threads are
finished with the queue.
"""
def __init__(self, maxsize=0):
Queue.Queue.__init__(self, maxsize)
queue.Queue.__init__(self, maxsize)
self.nthreads = 0
self.poisoned = False
@ -259,7 +270,7 @@ class FirstPipelineThread(PipelineThread):
return
self.out_queue.put(msg)
except:
except BaseException:
self.abort_all(sys.exc_info())
return
@ -307,7 +318,7 @@ class MiddlePipelineThread(PipelineThread):
return
self.out_queue.put(msg)
except:
except BaseException:
self.abort_all(sys.exc_info())
return
@ -346,7 +357,7 @@ class LastPipelineThread(PipelineThread):
# Send to consumer.
self.coro.send(msg)
except:
except BaseException:
self.abort_all(sys.exc_info())
return
@ -411,10 +422,10 @@ class Pipeline(object):
try:
# Using a timeout allows us to receive KeyboardInterrupt
# exceptions during the join().
while threads[-1].isAlive():
while threads[-1].is_alive():
threads[-1].join(1)
except:
except BaseException:
# Stop all the threads immediately.
for thread in threads:
thread.abort()
@ -431,7 +442,7 @@ class Pipeline(object):
exc_info = thread.exc_info
if exc_info:
# Make the exception appear as it was raised originally.
raise exc_info[0], exc_info[1], exc_info[2]
six.reraise(exc_info[0], exc_info[1], exc_info[2])
def pull(self):
"""Yield elements from the end of the pipeline. Runs the stages

167
libs/beetsplug/absubmit.py Normal file
View file

@ -0,0 +1,167 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Pieter Mulder.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Calculate acoustic information and submit to AcousticBrainz.
"""
from __future__ import division, absolute_import, print_function
import errno
import hashlib
import json
import os
import subprocess
import tempfile
from distutils.spawn import find_executable
import requests
from beets import plugins
from beets import util
from beets import ui
class ABSubmitError(Exception):
"""Raised when failing to analyse file with extractor."""
def call(args):
"""Execute the command and return its output.
Raise a AnalysisABSubmitError on failure.
"""
try:
return util.command_output(args)
except subprocess.CalledProcessError as e:
raise ABSubmitError(
u'{0} exited with status {1}'.format(args[0], e.returncode)
)
class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
def __init__(self):
super(AcousticBrainzSubmitPlugin, self).__init__()
self.config.add({'extractor': u''})
self.extractor = self.config['extractor'].as_str()
if self.extractor:
self.extractor = util.normpath(self.extractor)
# Expicit path to extractor
if not os.path.isfile(self.extractor):
raise ui.UserError(
u'Extractor command does not exist: {0}.'.
format(self.extractor)
)
else:
# Implicit path to extractor, search for it in path
self.extractor = 'streaming_extractor_music'
try:
call([self.extractor])
except OSError:
raise ui.UserError(
u'No extractor command found: please install the '
u'extractor binary from http://acousticbrainz.org/download'
)
except ABSubmitError:
# Extractor found, will exit with an error if not called with
# the correct amount of arguments.
pass
# Get the executable location on the system, which we need
# to calculate the SHA-1 hash.
self.extractor = find_executable(self.extractor)
# Calculate extractor hash.
self.extractor_sha = hashlib.sha1()
with open(self.extractor, 'rb') as extractor:
self.extractor_sha.update(extractor.read())
self.extractor_sha = self.extractor_sha.hexdigest()
base_url = 'https://acousticbrainz.org/api/v1/{mbid}/low-level'
def commands(self):
cmd = ui.Subcommand(
'absubmit',
help=u'calculate and submit AcousticBrainz analysis'
)
cmd.func = self.command
return [cmd]
def command(self, lib, opts, args):
# Get items from arguments
items = lib.items(ui.decargs(args))
for item in items:
analysis = self._get_analysis(item)
if analysis:
self._submit_data(item, analysis)
def _get_analysis(self, item):
mbid = item['mb_trackid']
# If file has no mbid skip it.
if not mbid:
self._log.info(u'Not analysing {}, missing '
u'musicbrainz track id.', item)
return None
# Temporary file to save extractor output to, extractor only works
# if an output file is given. Here we use a temporary file to copy
# the data into a python object and then remove the file from the
# system.
tmp_file, filename = tempfile.mkstemp(suffix='.json')
try:
# Close the file, so the extractor can overwrite it.
os.close(tmp_file)
try:
call([self.extractor, util.syspath(item.path), filename])
except ABSubmitError as e:
self._log.warning(
u'Failed to analyse {item} for AcousticBrainz: {error}',
item=item, error=e
)
return None
with open(filename, 'rb') as tmp_file:
analysis = json.load(tmp_file)
# Add the hash to the output.
analysis['metadata']['version']['essentia_build_sha'] = \
self.extractor_sha
return analysis
finally:
try:
os.remove(filename)
except OSError as e:
# ENOENT means file does not exist, just ignore this error.
if e.errno != errno.ENOENT:
raise
def _submit_data(self, item, data):
mbid = item['mb_trackid']
headers = {'Content-Type': 'application/json'}
response = requests.post(self.base_url.format(mbid=mbid),
json=data, headers=headers)
# Test that request was successful and raise an error on failure.
if response.status_code != 200:
try:
message = response.json()['message']
except (ValueError, KeyError) as e:
message = u'unable to get error message: {}'.format(e)
self._log.error(
u'Failed to submit AcousticBrainz analysis of {item}: '
u'{message}).', item=item, message=message
)
else:
self._log.debug(u'Successfully submitted AcousticBrainz analysis '
u'for {}.', item)

View file

@ -18,20 +18,101 @@
from __future__ import division, absolute_import, print_function
import requests
import operator
from collections import defaultdict
from beets import plugins, ui
from functools import reduce
ACOUSTIC_BASE = "https://acousticbrainz.org/"
LEVELS = ["/low-level", "/high-level"]
ABSCHEME = {
'highlevel': {
'danceability': {
'all': {
'danceable': 'danceable'
}
},
'gender': {
'value': 'gender'
},
'genre_rosamerica': {
'value': 'genre_rosamerica'
},
'mood_acoustic': {
'all': {
'acoustic': 'mood_acoustic'
}
},
'mood_aggressive': {
'all': {
'aggressive': 'mood_aggressive'
}
},
'mood_electronic': {
'all': {
'electronic': 'mood_electronic'
}
},
'mood_happy': {
'all': {
'happy': 'mood_happy'
}
},
'mood_party': {
'all': {
'party': 'mood_party'
}
},
'mood_relaxed': {
'all': {
'relaxed': 'mood_relaxed'
}
},
'mood_sad': {
'all': {
'sad': 'mood_sad'
}
},
'ismir04_rhythm': {
'value': 'rhythm'
},
'tonal_atonal': {
'all': {
'tonal': 'tonal'
}
},
'voice_instrumental': {
'value': 'voice_instrumental'
},
},
'lowlevel': {
'average_loudness': 'average_loudness'
},
'rhythm': {
'bpm': 'bpm'
},
'tonal': {
'chords_changes_rate': 'chords_changes_rate',
'chords_key': 'chords_key',
'chords_number_rate': 'chords_number_rate',
'chords_scale': 'chords_scale',
'key_key': ('initial_key', 0),
'key_scale': ('initial_key', 1),
'key_strength': 'key_strength'
}
}
class AcousticPlugin(plugins.BeetsPlugin):
def __init__(self):
super(AcousticPlugin, self).__init__()
self.config.add({'auto': True})
self.config.add({
'auto': True,
'force': False,
'tags': []
})
if self.config['auto']:
self.register_listener('import_task_files',
self.import_task_files)
@ -39,10 +120,16 @@ class AcousticPlugin(plugins.BeetsPlugin):
def commands(self):
cmd = ui.Subcommand('acousticbrainz',
help=u"fetch metadata from AcousticBrainz")
cmd.parser.add_option(
u'-f', u'--force', dest='force_refetch',
action='store_true', default=False,
help=u're-download data when already present'
)
def func(lib, opts, args):
items = lib.items(ui.decargs(args))
fetch_info(self._log, items, ui.should_write())
self._fetch_info(items, ui.should_write(),
opts.force_refetch or self.config['force'])
cmd.func = func
return [cmd]
@ -50,116 +137,169 @@ class AcousticPlugin(plugins.BeetsPlugin):
def import_task_files(self, session, task):
"""Function is called upon beet import.
"""
self._fetch_info(task.imported_items(), False, True)
items = task.imported_items()
fetch_info(self._log, items, False)
def _get_data(self, mbid):
data = {}
for url in _generate_urls(mbid):
self._log.debug(u'fetching URL: {}', url)
def fetch_info(log, items, write):
"""Get data from AcousticBrainz for the items.
"""
def get_value(*map_path):
try:
return reduce(operator.getitem, map_path, data)
except KeyError:
log.debug(u'Invalid Path: {}', map_path)
for item in items:
if item.mb_trackid:
log.info(u'getting data for: {}', item)
# Fetch the data from the AB API.
urls = [generate_url(item.mb_trackid, path) for path in LEVELS]
log.debug(u'fetching URLs: {}', ' '.join(urls))
try:
res = [requests.get(url) for url in urls]
res = requests.get(url)
except requests.RequestException as exc:
log.info(u'request error: {}', exc)
continue
self._log.info(u'request error: {}', exc)
return {}
# Check for missing tracks.
if any(r.status_code == 404 for r in res):
log.info(u'recording ID {} not found', item.mb_trackid)
continue
if res.status_code == 404:
self._log.info(u'recording ID {} not found', mbid)
return {}
# Parse the JSON response.
try:
data = res[0].json()
data.update(res[1].json())
data.update(res.json())
except ValueError:
log.debug(u'Invalid Response: {} & {}', [r.text for r in res])
self._log.debug(u'Invalid Response: {}', res.text)
return {}
# Get each field and assign it on the item.
item.danceable = get_value(
"highlevel", "danceability", "all", "danceable",
)
item.gender = get_value(
"highlevel", "gender", "value",
)
item.genre_rosamerica = get_value(
"highlevel", "genre_rosamerica", "value"
)
item.mood_acoustic = get_value(
"highlevel", "mood_acoustic", "all", "acoustic"
)
item.mood_aggressive = get_value(
"highlevel", "mood_aggressive", "all", "aggressive"
)
item.mood_electronic = get_value(
"highlevel", "mood_electronic", "all", "electronic"
)
item.mood_happy = get_value(
"highlevel", "mood_happy", "all", "happy"
)
item.mood_party = get_value(
"highlevel", "mood_party", "all", "party"
)
item.mood_relaxed = get_value(
"highlevel", "mood_relaxed", "all", "relaxed"
)
item.mood_sad = get_value(
"highlevel", "mood_sad", "all", "sad"
)
item.rhythm = get_value(
"highlevel", "ismir04_rhythm", "value"
)
item.tonal = get_value(
"highlevel", "tonal_atonal", "all", "tonal"
)
item.voice_instrumental = get_value(
"highlevel", "voice_instrumental", "value"
)
item.average_loudness = get_value(
"lowlevel", "average_loudness"
)
item.chords_changes_rate = get_value(
"tonal", "chords_changes_rate"
)
item.chords_key = get_value(
"tonal", "chords_key"
)
item.chords_number_rate = get_value(
"tonal", "chords_number_rate"
)
item.chords_scale = get_value(
"tonal", "chords_scale"
)
item.initial_key = '{} {}'.format(
get_value("tonal", "key_key"),
get_value("tonal", "key_scale")
)
item.key_strength = get_value(
"tonal", "key_strength"
)
return data
# Store the data.
def _fetch_info(self, items, write, force):
"""Fetch additional information from AcousticBrainz for the `item`s.
"""
tags = self.config['tags'].as_str_seq()
for item in items:
# If we're not forcing re-downloading for all tracks, check
# whether the data is already present. We use one
# representative field name to check for previously fetched
# data.
if not force:
mood_str = item.get('mood_acoustic', u'')
if mood_str:
self._log.info(u'data already present for: {}', item)
continue
# We can only fetch data for tracks with MBIDs.
if not item.mb_trackid:
continue
self._log.info(u'getting data for: {}', item)
data = self._get_data(item.mb_trackid)
if data:
for attr, val in self._map_data_to_scheme(data, ABSCHEME):
if not tags or attr in tags:
self._log.debug(u'attribute {} of {} set to {}',
attr,
item,
val)
setattr(item, attr, val)
else:
self._log.debug(u'skipping attribute {} of {}'
u' (value {}) due to config',
attr,
item,
val)
item.store()
if write:
item.try_write()
def _map_data_to_scheme(self, data, scheme):
"""Given `data` as a structure of nested dictionaries, and `scheme` as a
structure of nested dictionaries , `yield` tuples `(attr, val)` where
`attr` and `val` are corresponding leaf nodes in `scheme` and `data`.
def generate_url(mbid, level):
"""Generates AcousticBrainz end point url for given MBID.
As its name indicates, `scheme` defines how the data is structured,
so this function tries to find leaf nodes in `data` that correspond
to the leafs nodes of `scheme`, and not the other way around.
Leaf nodes of `data` that do not exist in the `scheme` do not matter.
If a leaf node of `scheme` is not present in `data`,
no value is yielded for that attribute and a simple warning is issued.
Finally, to account for attributes of which the value is split between
several leaf nodes in `data`, leaf nodes of `scheme` can be tuples
`(attr, order)` where `attr` is the attribute to which the leaf node
belongs, and `order` is the place at which it should appear in the
value. The different `value`s belonging to the same `attr` are simply
joined with `' '`. This is hardcoded and not very flexible, but it gets
the job done.
For example:
>>> scheme = {
'key1': 'attribute',
'key group': {
'subkey1': 'subattribute',
'subkey2': ('composite attribute', 0)
},
'key2': ('composite attribute', 1)
}
>>> data = {
'key1': 'value',
'key group': {
'subkey1': 'subvalue',
'subkey2': 'part 1 of composite attr'
},
'key2': 'part 2'
}
>>> print(list(_map_data_to_scheme(data, scheme)))
[('subattribute', 'subvalue'),
('attribute', 'value'),
('composite attribute', 'part 1 of composite attr part 2')]
"""
return ACOUSTIC_BASE + mbid + level
# First, we traverse `scheme` and `data`, `yield`ing all the non
# composites attributes straight away and populating the dictionary
# `composites` with the composite attributes.
# When we are finished traversing `scheme`, `composites` should
# map each composite attribute to an ordered list of the values
# belonging to the attribute, for example:
# `composites = {'initial_key': ['B', 'minor']}`.
# The recursive traversal.
composites = defaultdict(list)
for attr, val in self._data_to_scheme_child(data,
scheme,
composites):
yield attr, val
# When composites has been populated, yield the composite attributes
# by joining their parts.
for composite_attr, value_parts in composites.items():
yield composite_attr, ' '.join(value_parts)
def _data_to_scheme_child(self, subdata, subscheme, composites):
"""The recursive business logic of :meth:`_map_data_to_scheme`:
Traverse two structures of nested dictionaries in parallel and `yield`
tuples of corresponding leaf nodes.
If a leaf node belongs to a composite attribute (is a `tuple`),
populate `composites` rather than yielding straight away.
All the child functions for a single traversal share the same
`composites` instance, which is passed along.
"""
for k, v in subscheme.items():
if k in subdata:
if type(v) == dict:
for attr, val in self._data_to_scheme_child(subdata[k],
v,
composites):
yield attr, val
elif type(v) == tuple:
composite_attribute, part_number = v
attribute_parts = composites[composite_attribute]
# Parts are not guaranteed to be inserted in order
while len(attribute_parts) <= part_number:
attribute_parts.append('')
attribute_parts[part_number] = subdata[k]
else:
yield v, subdata[k]
else:
self._log.warning(u'Acousticbrainz did not provide info'
u'about {}', k)
self._log.debug(u'Data {} could not be mapped to scheme {} '
u'because key {} was not found', subdata, v, k)
def _generate_urls(mbid):
"""Generates AcousticBrainz end point urls for given `mbid`.
"""
for level in LEVELS:
yield ACOUSTIC_BASE + mbid + level

View file

@ -27,6 +27,24 @@ import shlex
import os
import errno
import sys
import six
class CheckerCommandException(Exception):
"""Raised when running a checker failed.
Attributes:
checker: Checker command name.
path: Path to the file being validated.
errno: Error number from the checker execution error.
msg: Message from the checker execution error.
"""
def __init__(self, cmd, oserror):
self.checker = cmd[0]
self.path = cmd[-1]
self.errno = oserror.errno
self.msg = str(oserror)
class BadFiles(BeetsPlugin):
@ -42,11 +60,7 @@ class BadFiles(BeetsPlugin):
errors = 1
status = e.returncode
except OSError as e:
if e.errno == errno.ENOENT:
ui.print_(u"command not found: {}".format(cmd[0]))
sys.exit(1)
else:
raise
raise CheckerCommandException(cmd, e)
output = output.decode(sys.getfilesystemencoding())
return status, errors, [line for line in output.split("\n") if line]
@ -92,29 +106,47 @@ class BadFiles(BeetsPlugin):
ui.colorize('text_error', dpath)))
# Run the checker against the file if one is found
ext = os.path.splitext(item.path)[1][1:]
ext = os.path.splitext(item.path)[1][1:].decode('utf8', 'ignore')
checker = self.get_checker(ext)
if not checker:
self._log.error(u"no checker specified in the config for {}",
ext)
continue
path = item.path
if not isinstance(path, unicode):
if not isinstance(path, six.text_type):
path = item.path.decode(sys.getfilesystemencoding())
try:
status, errors, output = checker(path)
except CheckerCommandException as e:
if e.errno == errno.ENOENT:
self._log.error(
u"command not found: {} when validating file: {}",
e.checker,
e.path
)
else:
self._log.error(u"error invoking {}: {}", e.checker, e.msg)
continue
if status > 0:
ui.print_(u"{}: checker exited withs status {}"
ui.print_(u"{}: checker exited with status {}"
.format(ui.colorize('text_error', dpath), status))
for line in output:
ui.print_(" {}".format(displayable_path(line)))
ui.print_(u" {}".format(displayable_path(line)))
elif errors > 0:
ui.print_(u"{}: checker found {} errors or warnings"
.format(ui.colorize('text_warning', dpath), errors))
for line in output:
ui.print_(u" {}".format(displayable_path(line)))
else:
elif opts.verbose:
ui.print_(u"{}: ok".format(ui.colorize('text_success', dpath)))
def commands(self):
bad_command = Subcommand('bad',
help=u'check for corrupt or missing files')
bad_command.parser.add_option(
u'-v', u'--verbose',
action='store_true', default=False, dest='verbose',
help=u'view results for both the bad and uncorrupted files'
)
bad_command.func = self.check_bad
return [bad_command]

461
libs/beetsplug/beatport.py Normal file
View file

@ -0,0 +1,461 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Adds Beatport release and track search support to the autotagger
"""
from __future__ import division, absolute_import, print_function
import json
import re
import six
from datetime import datetime, timedelta
from requests_oauthlib import OAuth1Session
from requests_oauthlib.oauth1_session import (TokenRequestDenied, TokenMissing,
VerifierMissing)
import beets
import beets.ui
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
AUTH_ERRORS = (TokenRequestDenied, TokenMissing, VerifierMissing)
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
class BeatportAPIError(Exception):
pass
class BeatportObject(object):
def __init__(self, data):
self.beatport_id = data['id']
self.name = six.text_type(data['name'])
if 'releaseDate' in data:
self.release_date = datetime.strptime(data['releaseDate'],
'%Y-%m-%d')
if 'artists' in data:
self.artists = [(x['id'], six.text_type(x['name']))
for x in data['artists']]
if 'genres' in data:
self.genres = [six.text_type(x['name'])
for x in data['genres']]
class BeatportClient(object):
_api_base = 'https://oauth-api.beatport.com'
def __init__(self, c_key, c_secret, auth_key=None, auth_secret=None):
""" Initiate the client with OAuth information.
For the initial authentication with the backend `auth_key` and
`auth_secret` can be `None`. Use `get_authorize_url` and
`get_access_token` to obtain them for subsequent uses of the API.
:param c_key: OAuth1 client key
:param c_secret: OAuth1 client secret
:param auth_key: OAuth1 resource owner key
:param auth_secret: OAuth1 resource owner secret
"""
self.api = OAuth1Session(
client_key=c_key, client_secret=c_secret,
resource_owner_key=auth_key,
resource_owner_secret=auth_secret,
callback_uri='oob')
self.api.headers = {'User-Agent': USER_AGENT}
def get_authorize_url(self):
""" Generate the URL for the user to authorize the application.
Retrieves a request token from the Beatport API and returns the
corresponding authorization URL on their end that the user has
to visit.
This is the first step of the initial authorization process with the
API. Once the user has visited the URL, call
:py:method:`get_access_token` with the displayed data to complete
the process.
:returns: Authorization URL for the user to visit
:rtype: unicode
"""
self.api.fetch_request_token(
self._make_url('/identity/1/oauth/request-token'))
return self.api.authorization_url(
self._make_url('/identity/1/oauth/authorize'))
def get_access_token(self, auth_data):
""" Obtain the final access token and secret for the API.
:param auth_data: URL-encoded authorization data as displayed at
the authorization url (obtained via
:py:meth:`get_authorize_url`) after signing in
:type auth_data: unicode
:returns: OAuth resource owner key and secret
:rtype: (unicode, unicode) tuple
"""
self.api.parse_authorization_response(
"http://beets.io/auth?" + auth_data)
access_data = self.api.fetch_access_token(
self._make_url('/identity/1/oauth/access-token'))
return access_data['oauth_token'], access_data['oauth_token_secret']
def search(self, query, release_type='release', details=True):
""" Perform a search of the Beatport catalogue.
:param query: Query string
:param release_type: Type of releases to search for, can be
'release' or 'track'
:param details: Retrieve additional information about the
search results. Currently this will fetch
the tracklist for releases and do nothing for
tracks
:returns: Search results
:rtype: generator that yields
py:class:`BeatportRelease` or
:py:class:`BeatportTrack`
"""
response = self._get('catalog/3/search',
query=query, perPage=5,
facets=['fieldType:{0}'.format(release_type)])
for item in response:
if release_type == 'release':
if details:
release = self.get_release(item['id'])
else:
release = BeatportRelease(item)
yield release
elif release_type == 'track':
yield BeatportTrack(item)
def get_release(self, beatport_id):
""" Get information about a single release.
:param beatport_id: Beatport ID of the release
:returns: The matching release
:rtype: :py:class:`BeatportRelease`
"""
response = self._get('/catalog/3/releases', id=beatport_id)
release = BeatportRelease(response[0])
release.tracks = self.get_release_tracks(beatport_id)
return release
def get_release_tracks(self, beatport_id):
""" Get all tracks for a given release.
:param beatport_id: Beatport ID of the release
:returns: Tracks in the matching release
:rtype: list of :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', releaseId=beatport_id,
perPage=100)
return [BeatportTrack(t) for t in response]
def get_track(self, beatport_id):
""" Get information about a single track.
:param beatport_id: Beatport ID of the track
:returns: The matching track
:rtype: :py:class:`BeatportTrack`
"""
response = self._get('/catalog/3/tracks', id=beatport_id)
return BeatportTrack(response[0])
def _make_url(self, endpoint):
""" Get complete URL for a given API endpoint. """
if not endpoint.startswith('/'):
endpoint = '/' + endpoint
return self._api_base + endpoint
def _get(self, endpoint, **kwargs):
""" Perform a GET request on a given API endpoint.
Automatically extracts result data from the response and converts HTTP
exceptions into :py:class:`BeatportAPIError` objects.
"""
try:
response = self.api.get(self._make_url(endpoint), params=kwargs)
except Exception as e:
raise BeatportAPIError("Error connecting to Beatport API: {}"
.format(e.message))
if not response:
raise BeatportAPIError(
"Error {0.status_code} for '{0.request.path_url}"
.format(response))
return response.json()['results']
@six.python_2_unicode_compatible
class BeatportRelease(BeatportObject):
def __str__(self):
if len(self.artists) < 4:
artist_str = ", ".join(x[1] for x in self.artists)
else:
artist_str = "Various Artists"
return u"<BeatportRelease: {0} - {1} ({2})>".format(
artist_str,
self.name,
self.catalog_number,
)
def __repr__(self):
return six.text_type(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'catalogNumber' in data:
self.catalog_number = data['catalogNumber']
if 'label' in data:
self.label_name = data['label']['name']
if 'category' in data:
self.category = data['category']
if 'slug' in data:
self.url = "http://beatport.com/release/{0}/{1}".format(
data['slug'], data['id'])
@six.python_2_unicode_compatible
class BeatportTrack(BeatportObject):
def __str__(self):
artist_str = ", ".join(x[1] for x in self.artists)
return (u"<BeatportTrack: {0} - {1} ({2})>"
.format(artist_str, self.name, self.mix_name))
def __repr__(self):
return six.text_type(self).encode('utf-8')
def __init__(self, data):
BeatportObject.__init__(self, data)
if 'title' in data:
self.title = six.text_type(data['title'])
if 'mixName' in data:
self.mix_name = six.text_type(data['mixName'])
self.length = timedelta(milliseconds=data.get('lengthMs', 0) or 0)
if not self.length:
try:
min, sec = data.get('length', '0:0').split(':')
self.length = timedelta(minutes=int(min), seconds=int(sec))
except ValueError:
pass
if 'slug' in data:
self.url = "http://beatport.com/track/{0}/{1}".format(data['slug'],
data['id'])
self.track_number = data.get('trackNumber')
class BeatportPlugin(BeetsPlugin):
def __init__(self):
super(BeatportPlugin, self).__init__()
self.config.add({
'apikey': '57713c3906af6f5def151b33601389176b37b429',
'apisecret': 'b3fe08c93c80aefd749fe871a16cd2bb32e2b954',
'tokenfile': 'beatport_token.json',
'source_weight': 0.5,
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Get the OAuth token from a file or log in.
try:
with open(self._tokenfile()) as f:
tokendata = json.load(f)
except IOError:
# No token yet. Generate one.
token, secret = self.authenticate(c_key, c_secret)
else:
token = tokendata['token']
secret = tokendata['secret']
self.client = BeatportClient(c_key, c_secret, token, secret)
def authenticate(self, c_key, c_secret):
# Get the link for the OAuth page.
auth_client = BeatportClient(c_key, c_secret)
try:
url = auth_client.get_authorize_url()
except AUTH_ERRORS as e:
self._log.debug(u'authentication error: {0}', e)
raise beets.ui.UserError(u'communication with Beatport failed')
beets.ui.print_(u"To authenticate with Beatport, visit:")
beets.ui.print_(url)
# Ask for the verifier data and validate it.
data = beets.ui.input_(u"Enter the string displayed in your browser:")
try:
token, secret = auth_client.get_access_token(data)
except AUTH_ERRORS as e:
self._log.debug(u'authentication error: {0}', e)
raise beets.ui.UserError(u'Beatport token request failed')
# Save the token for later use.
self._log.debug(u'Beatport token {0}, secret {1}', token, secret)
with open(self._tokenfile(), 'w') as f:
json.dump({'token': token, 'secret': secret}, f)
return token, secret
def _tokenfile(self):
"""Get the path to the JSON file for storing the OAuth token.
"""
return self.config['tokenfile'].get(confit.Filename(in_app_dir=True))
def album_distance(self, items, album_info, mapping):
"""Returns the beatport source weight and the maximum source weight
for albums.
"""
dist = Distance()
if album_info.data_source == 'Beatport':
dist.add('source', self.config['source_weight'].as_number())
return dist
def track_distance(self, item, track_info):
"""Returns the beatport source weight and the maximum source weight
for individual tracks.
"""
dist = Distance()
if track_info.data_source == 'Beatport':
dist.add('source', self.config['source_weight'].as_number())
return dist
def candidates(self, items, artist, release, va_likely):
"""Returns a list of AlbumInfo objects for beatport search results
matching release and artist (if not various).
"""
if va_likely:
query = release
else:
query = '%s %s' % (artist, release)
try:
return self._get_releases(query)
except BeatportAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
return []
def item_candidates(self, item, artist, title):
"""Returns a list of TrackInfo objects for beatport search results
matching title and artist.
"""
query = '%s %s' % (artist, title)
try:
return self._get_tracks(query)
except BeatportAPIError as e:
self._log.debug(u'API Error: {0} (query: {1})', e, query)
return []
def album_for_id(self, release_id):
"""Fetches a release by its Beatport ID and returns an AlbumInfo object
or None if the release is not found.
"""
self._log.debug(u'Searching for release {0}', release_id)
match = re.search(r'(^|beatport\.com/release/.+/)(\d+)$', release_id)
if not match:
return None
release = self.client.get_release(match.group(2))
album = self._get_album_info(release)
return album
def track_for_id(self, track_id):
"""Fetches a track by its Beatport ID and returns a TrackInfo object
or None if the track is not found.
"""
self._log.debug(u'Searching for track {0}', track_id)
match = re.search(r'(^|beatport\.com/track/.+/)(\d+)$', track_id)
if not match:
return None
bp_track = self.client.get_track(match.group(2))
track = self._get_track_info(bp_track)
return track
def _get_releases(self, query):
"""Returns a list of AlbumInfo objects for a beatport search query.
"""
# Strip non-word characters from query. Things like "!" and "-" can
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
query = re.sub(r'\W+', ' ', query, flags=re.UNICODE)
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'\b(CD|disc)\s*\d+', '', query, flags=re.I)
albums = [self._get_album_info(x)
for x in self.client.search(query)]
return albums
def _get_album_info(self, release):
"""Returns an AlbumInfo object for a Beatport Release object.
"""
va = len(release.artists) > 3
artist, artist_id = self._get_artist(release.artists)
if va:
artist = u"Various Artists"
tracks = [self._get_track_info(x) for x in release.tracks]
return AlbumInfo(album=release.name, album_id=release.beatport_id,
artist=artist, artist_id=artist_id, tracks=tracks,
albumtype=release.category, va=va,
year=release.release_date.year,
month=release.release_date.month,
day=release.release_date.day,
label=release.label_name,
catalognum=release.catalog_number, media=u'Digital',
data_source=u'Beatport', data_url=release.url)
def _get_track_info(self, track):
"""Returns a TrackInfo object for a Beatport Track object.
"""
title = track.name
if track.mix_name != u"Original Mix":
title += u" ({0})".format(track.mix_name)
artist, artist_id = self._get_artist(track.artists)
length = track.length.total_seconds()
return TrackInfo(title=title, track_id=track.beatport_id,
artist=artist, artist_id=artist_id,
length=length, index=track.track_number,
medium_index=track.track_number,
data_source=u'Beatport', data_url=track.url)
def _get_artist(self, artists):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of Beatport release or track artists.
"""
artist_id = None
bits = []
for artist in artists:
if not artist_id:
artist_id = artist[0]
name = artist[1]
# Strip disambiguation number.
name = re.sub(r' \(\d+\)$', '', name)
# Move articles to the front.
name = re.sub(r'^(.*?), (a|an|the)$', r'\2 \1', name, flags=re.I)
bits.append(name)
artist = ', '.join(bits).replace(' ,', ',') or None
return artist, artist_id
def _get_tracks(self, query):
"""Returns a list of TrackInfo objects for a Beatport query.
"""
bp_tracks = self.client.search(query, release_type='track')
tracks = [self._get_track_info(x) for x in bp_tracks]
return tracks

View file

@ -35,17 +35,18 @@ from beets.util import bluelet
from beets.library import Item
from beets import dbcore
from beets.mediafile import MediaFile
import six
PROTOCOL_VERSION = '0.13.0'
BUFSIZE = 1024
HELLO = 'OK MPD %s' % PROTOCOL_VERSION
CLIST_BEGIN = 'command_list_begin'
CLIST_VERBOSE_BEGIN = 'command_list_ok_begin'
CLIST_END = 'command_list_end'
RESP_OK = 'OK'
RESP_CLIST_VERBOSE = 'list_OK'
RESP_ERR = 'ACK'
HELLO = u'OK MPD %s' % PROTOCOL_VERSION
CLIST_BEGIN = u'command_list_begin'
CLIST_VERBOSE_BEGIN = u'command_list_ok_begin'
CLIST_END = u'command_list_end'
RESP_OK = u'OK'
RESP_CLIST_VERBOSE = u'list_OK'
RESP_ERR = u'ACK'
NEWLINE = u"\n"
@ -305,12 +306,12 @@ class BaseServer(object):
playlist, playlistlength, and xfade.
"""
yield (
u'volume: ' + unicode(self.volume),
u'repeat: ' + unicode(int(self.repeat)),
u'random: ' + unicode(int(self.random)),
u'playlist: ' + unicode(self.playlist_version),
u'playlistlength: ' + unicode(len(self.playlist)),
u'xfade: ' + unicode(self.crossfade),
u'volume: ' + six.text_type(self.volume),
u'repeat: ' + six.text_type(int(self.repeat)),
u'random: ' + six.text_type(int(self.random)),
u'playlist: ' + six.text_type(self.playlist_version),
u'playlistlength: ' + six.text_type(len(self.playlist)),
u'xfade: ' + six.text_type(self.crossfade),
)
if self.current_index == -1:
@ -323,8 +324,8 @@ class BaseServer(object):
if self.current_index != -1: # i.e., paused or playing
current_id = self._item_id(self.playlist[self.current_index])
yield u'song: ' + unicode(self.current_index)
yield u'songid: ' + unicode(current_id)
yield u'song: ' + six.text_type(self.current_index)
yield u'songid: ' + six.text_type(current_id)
if self.error:
yield u'error: ' + self.error
@ -468,8 +469,8 @@ class BaseServer(object):
Also a dummy implementation.
"""
for idx, track in enumerate(self.playlist):
yield u'cpos: ' + unicode(idx)
yield u'Id: ' + unicode(track.id)
yield u'cpos: ' + six.text_type(idx)
yield u'Id: ' + six.text_type(track.id)
def cmd_currentsong(self, conn):
"""Sends information about the currently-playing song.
@ -569,12 +570,12 @@ class Connection(object):
added after every string. Returns a Bluelet event that sends
the data.
"""
if isinstance(lines, basestring):
if isinstance(lines, six.string_types):
lines = [lines]
out = NEWLINE.join(lines) + NEWLINE
log.debug('{}', out[:-1]) # Don't log trailing newline.
if isinstance(out, unicode):
out = out.encode('utf8')
if isinstance(out, six.text_type):
out = out.encode('utf-8')
return self.sock.sendall(out)
def do_command(self, command):
@ -603,7 +604,8 @@ class Connection(object):
line = line.strip()
if not line:
break
log.debug('{}', line)
line = line.decode('utf8') # MPD protocol uses UTF-8.
log.debug(u'{}', line)
if clist is not None:
# Command list already opened.
@ -639,8 +641,8 @@ class Command(object):
"""A command issued by the client for processing by the server.
"""
command_re = re.compile(br'^([^ \t]+)[ \t]*')
arg_re = re.compile(br'"((?:\\"|[^"])+)"|([^ \t"]+)')
command_re = re.compile(r'^([^ \t]+)[ \t]*')
arg_re = re.compile(r'"((?:\\"|[^"])+)"|([^ \t"]+)')
def __init__(self, s):
"""Creates a new `Command` from the given string, `s`, parsing
@ -655,11 +657,10 @@ class Command(object):
if match[0]:
# Quoted argument.
arg = match[0]
arg = arg.replace(b'\\"', b'"').replace(b'\\\\', b'\\')
arg = arg.replace(u'\\"', u'"').replace(u'\\\\', u'\\')
else:
# Unquoted argument.
arg = match[1]
arg = arg.decode('utf8')
self.args.append(arg)
def run(self, conn):
@ -771,28 +772,28 @@ class Server(BaseServer):
def _item_info(self, item):
info_lines = [
u'file: ' + item.destination(fragment=True),
u'Time: ' + unicode(int(item.length)),
u'Time: ' + six.text_type(int(item.length)),
u'Title: ' + item.title,
u'Artist: ' + item.artist,
u'Album: ' + item.album,
u'Genre: ' + item.genre,
]
track = unicode(item.track)
track = six.text_type(item.track)
if item.tracktotal:
track += u'/' + unicode(item.tracktotal)
track += u'/' + six.text_type(item.tracktotal)
info_lines.append(u'Track: ' + track)
info_lines.append(u'Date: ' + unicode(item.year))
info_lines.append(u'Date: ' + six.text_type(item.year))
try:
pos = self._id_to_index(item.id)
info_lines.append(u'Pos: ' + unicode(pos))
info_lines.append(u'Pos: ' + six.text_type(pos))
except ArgumentNotFoundError:
# Don't include position if not in playlist.
pass
info_lines.append(u'Id: ' + unicode(item.id))
info_lines.append(u'Id: ' + six.text_type(item.id))
return info_lines
@ -852,7 +853,7 @@ class Server(BaseServer):
for name, itemid in iter(sorted(node.files.items())):
item = self.lib.get_item(itemid)
yield self._item_info(item)
for name, _ in iter(sorted(node.dirs.iteritems())):
for name, _ in iter(sorted(node.dirs.items())):
dirpath = self._path_join(path, name)
if dirpath.startswith(u"/"):
# Strip leading slash (libmpc rejects this).
@ -872,12 +873,12 @@ class Server(BaseServer):
yield u'file: ' + basepath
else:
# List a directory. Recurse into both directories and files.
for name, itemid in sorted(node.files.iteritems()):
for name, itemid in sorted(node.files.items()):
newpath = self._path_join(basepath, name)
# "yield from"
for v in self._listall(newpath, itemid, info):
yield v
for name, subdir in sorted(node.dirs.iteritems()):
for name, subdir in sorted(node.dirs.items()):
newpath = self._path_join(basepath, name)
yield u'directory: ' + newpath
for v in self._listall(newpath, subdir, info):
@ -902,11 +903,11 @@ class Server(BaseServer):
yield self.lib.get_item(node)
else:
# Recurse into a directory.
for name, itemid in sorted(node.files.iteritems()):
for name, itemid in sorted(node.files.items()):
# "yield from"
for v in self._all_items(itemid):
yield v
for name, subdir in sorted(node.dirs.iteritems()):
for name, subdir in sorted(node.dirs.items()):
for v in self._all_items(subdir):
yield v
@ -917,7 +918,7 @@ class Server(BaseServer):
for item in self._all_items(self._resolve_path(path)):
self.playlist.append(item)
if send_id:
yield u'Id: ' + unicode(item.id)
yield u'Id: ' + six.text_type(item.id)
self.playlist_version += 1
def cmd_add(self, conn, path):
@ -938,11 +939,11 @@ class Server(BaseServer):
if self.current_index > -1:
item = self.playlist[self.current_index]
yield u'bitrate: ' + unicode(item.bitrate / 1000)
yield u'bitrate: ' + six.text_type(item.bitrate / 1000)
# Missing 'audio'.
(pos, total) = self.player.time()
yield u'time: ' + unicode(pos) + u':' + unicode(total)
yield u'time: ' + six.text_type(pos) + u':' + six.text_type(total)
# Also missing 'updating_db'.
@ -957,13 +958,13 @@ class Server(BaseServer):
artists, albums, songs, totaltime = tx.query(statement)[0]
yield (
u'artists: ' + unicode(artists),
u'albums: ' + unicode(albums),
u'songs: ' + unicode(songs),
u'uptime: ' + unicode(int(time.time() - self.startup_time)),
u'artists: ' + six.text_type(artists),
u'albums: ' + six.text_type(albums),
u'songs: ' + six.text_type(songs),
u'uptime: ' + six.text_type(int(time.time() - self.startup_time)),
u'playtime: ' + u'0', # Missing.
u'db_playtime: ' + unicode(int(totaltime)),
u'db_update: ' + unicode(int(self.updated_time)),
u'db_playtime: ' + six.text_type(int(totaltime)),
u'db_update: ' + six.text_type(int(self.updated_time)),
)
# Searching.
@ -1059,7 +1060,7 @@ class Server(BaseServer):
rows = tx.query(statement, subvals)
for row in rows:
yield show_tag_canon + u': ' + unicode(row[0])
yield show_tag_canon + u': ' + six.text_type(row[0])
def cmd_count(self, conn, tag, value):
"""Returns the number and total time of songs matching the
@ -1071,8 +1072,8 @@ class Server(BaseServer):
for item in self.lib.items(dbcore.query.MatchQuery(key, value)):
songs += 1
playtime += item.length
yield u'songs: ' + unicode(songs)
yield u'playtime: ' + unicode(int(playtime))
yield u'songs: ' + six.text_type(songs)
yield u'playtime: ' + six.text_type(int(playtime))
# "Outputs." Just a dummy implementation because we don't control
# any outputs.
@ -1167,7 +1168,7 @@ class BPDPlugin(BeetsPlugin):
server.run()
except NoGstreamerError:
global_log.error(u'Gstreamer Python bindings not found.')
global_log.error(u'Install "python-gst0.10", "py27-gst-python", '
global_log.error(u'Install "gstreamer1.0" and "python-gi"'
u'or similar package to use BPD.')
def commands(self):
@ -1180,11 +1181,12 @@ class BPDPlugin(BeetsPlugin):
)
def func(lib, opts, args):
host = args.pop(0) if args else self.config['host'].get(unicode)
host = self.config['host'].as_str()
host = args.pop(0) if args else host
port = args.pop(0) if args else self.config['port'].get(int)
if args:
raise beets.ui.UserError(u'too many arguments')
password = self.config['password'].get(unicode)
password = self.config['password'].as_str()
volume = self.config['volume'].get(int)
debug = opts.debug or False
self.start_bpd(lib, host, int(port), password, volume, debug)

View file

@ -19,17 +19,25 @@ music player.
from __future__ import division, absolute_import, print_function
import six
import sys
import time
import gobject
import thread
from six.moves import _thread
import os
import copy
import urllib
from six.moves import urllib
from beets import ui
import pygst
pygst.require('0.10')
import gst # noqa
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GLib, Gst # noqa: E402
Gst.init(None)
class QueryError(Exception):
pass
class GstPlayer(object):
@ -57,8 +65,19 @@ class GstPlayer(object):
# Set up the Gstreamer player. From the pygst tutorial:
# http://pygstdocs.berlios.de/pygst-tutorial/playbin.html
self.player = gst.element_factory_make("playbin2", "player")
fakesink = gst.element_factory_make("fakesink", "fakesink")
####
# Updated to GStreamer 1.0 with:
# https://wiki.ubuntu.com/Novacut/GStreamer1.0
self.player = Gst.ElementFactory.make("playbin", "player")
if self.player is None:
raise ui.UserError("Could not create playbin")
fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
if fakesink is None:
raise ui.UserError("Could not create fakesink")
self.player.set_property("video-sink", fakesink)
bus = self.player.get_bus()
bus.add_signal_watch()
@ -74,21 +93,21 @@ class GstPlayer(object):
"""Returns the current state flag of the playbin."""
# gst's get_state function returns a 3-tuple; we just want the
# status flag in position 1.
return self.player.get_state()[1]
return self.player.get_state(Gst.CLOCK_TIME_NONE)[1]
def _handle_message(self, bus, message):
"""Callback for status updates from GStreamer."""
if message.type == gst.MESSAGE_EOS:
if message.type == Gst.MessageType.EOS:
# file finished playing
self.player.set_state(gst.STATE_NULL)
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
if self.finished_callback:
self.finished_callback()
elif message.type == gst.MESSAGE_ERROR:
elif message.type == Gst.MessageType.ERROR:
# error
self.player.set_state(gst.STATE_NULL)
self.player.set_state(Gst.State.NULL)
err, debug = message.parse_error()
print(u"Error: {0}".format(err))
self.playing = False
@ -109,27 +128,27 @@ class GstPlayer(object):
"""Immediately begin playing the audio file at the given
path.
"""
self.player.set_state(gst.STATE_NULL)
if isinstance(path, unicode):
path = path.encode('utf8')
uri = 'file://' + urllib.quote(path)
self.player.set_state(Gst.State.NULL)
if isinstance(path, six.text_type):
path = path.encode('utf-8')
uri = 'file://' + urllib.parse.quote(path)
self.player.set_property("uri", uri)
self.player.set_state(gst.STATE_PLAYING)
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def play(self):
"""If paused, resume playback."""
if self._get_state() == gst.STATE_PAUSED:
self.player.set_state(gst.STATE_PLAYING)
if self._get_state() == Gst.State.PAUSED:
self.player.set_state(Gst.State.PLAYING)
self.playing = True
def pause(self):
"""Pause playback."""
self.player.set_state(gst.STATE_PAUSED)
self.player.set_state(Gst.State.PAUSED)
def stop(self):
"""Halt playback."""
self.player.set_state(gst.STATE_NULL)
self.player.set_state(Gst.State.NULL)
self.playing = False
self.cached_time = None
@ -139,27 +158,36 @@ class GstPlayer(object):
Call this function before trying to play any music with
play_file() or play().
"""
# If we don't use the MainLoop, messages are never sent.
gobject.threads_init()
def start():
loop = gobject.MainLoop()
loop = GLib.MainLoop()
loop.run()
thread.start_new_thread(start, ())
_thread.start_new_thread(start, ())
def time(self):
"""Returns a tuple containing (position, length) where both
values are integers in seconds. If no stream is available,
returns (0, 0).
"""
fmt = gst.Format(gst.FORMAT_TIME)
fmt = Gst.Format(Gst.Format.TIME)
try:
pos = self.player.query_position(fmt, None)[0] / (10 ** 9)
length = self.player.query_duration(fmt, None)[0] / (10 ** 9)
posq = self.player.query_position(fmt)
if not posq[0]:
raise QueryError("query_position failed")
pos = posq[1] // (10 ** 9)
lengthq = self.player.query_duration(fmt)
if not lengthq[0]:
raise QueryError("query_duration failed")
length = lengthq[1] // (10 ** 9)
self.cached_time = (pos, length)
return (pos, length)
except gst.QueryError:
except QueryError:
# Stream not ready. For small gaps of time, for instance
# after seeking, the time values are unavailable. For this
# reason, we cache recent.
@ -175,9 +203,9 @@ class GstPlayer(object):
self.stop()
return
fmt = gst.Format(gst.FORMAT_TIME)
fmt = Gst.Format(Gst.Format.TIME)
ns = position * 10 ** 9 # convert to nanoseconds
self.player.seek_simple(fmt, gst.SEEK_FLAG_FLUSH, ns)
self.player.seek_simple(fmt, Gst.SeekFlags.FLUSH, ns)
# save new cached time
self.cached_time = (position, cur_len)
@ -208,12 +236,14 @@ def play_complicated(paths):
def next_song():
my_paths.pop(0)
p.play_file(my_paths[0])
p = GstPlayer(next_song)
p.run()
p.play_file(my_paths[0])
while my_paths:
time.sleep(1)
if __name__ == '__main__':
# A very simple command-line player. Just give it names of audio
# files on the command line; these are all played in sequence.

View file

@ -18,6 +18,7 @@
from __future__ import division, absolute_import, print_function
import time
from six.moves import input
from beets import ui
from beets.plugins import BeetsPlugin
@ -31,7 +32,7 @@ def bpm(max_strokes):
dt = []
for i in range(max_strokes):
# Press enter to the rhythm...
s = raw_input()
s = input()
if s == '':
t1 = time.time()
# Only start measuring at the second stroke
@ -64,7 +65,9 @@ class BPMPlugin(BeetsPlugin):
return [cmd]
def command(self, lib, opts, args):
self.get_bpm(lib.items(ui.decargs(args)))
items = lib.items(ui.decargs(args))
write = ui.should_write()
self.get_bpm(items, write)
def get_bpm(self, items, write=False):
overwrite = self.config['overwrite'].get(bool)

View file

@ -21,7 +21,8 @@ from __future__ import division, absolute_import, print_function
from datetime import datetime
import re
import string
from itertools import tee, izip
from six.moves import zip
from itertools import tee
from beets import plugins, ui
@ -37,7 +38,7 @@ def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return izip(a, b)
return zip(a, b)
def span_from_str(span_str):
@ -137,9 +138,10 @@ def str2fmt(s):
def format_span(fmt, yearfrom, yearto, fromnchars, tonchars):
"""Return a span string representation.
"""
args = (bytes(yearfrom)[-fromnchars:])
args = (str(yearfrom)[-fromnchars:])
if tonchars:
args = (bytes(yearfrom)[-fromnchars:], bytes(yearto)[-tonchars:])
args = (str(yearfrom)[-fromnchars:], str(yearto)[-tonchars:])
return fmt % args

View file

@ -121,7 +121,7 @@ def _all_releases(items):
for release_id in release_ids:
relcounts[release_id] += 1
for release_id, count in relcounts.iteritems():
for release_id, count in relcounts.items():
if float(count) / len(items) > COMMON_REL_THRESH:
yield release_id
@ -181,7 +181,7 @@ class AcoustidPlugin(plugins.BeetsPlugin):
def submit_cmd_func(lib, opts, args):
try:
apikey = config['acoustid']['apikey'].get(unicode)
apikey = config['acoustid']['apikey'].as_str()
except confit.NotFoundError:
raise ui.UserError(u'no Acoustid user API key provided')
submit_items(self._log, apikey, lib.items(ui.decargs(args)))
@ -236,7 +236,7 @@ def submit_items(log, userkey, items, chunksize=64):
try:
acoustid.submit(API_KEY, userkey, data)
except acoustid.AcoustidError as exc:
log.warn(u'acoustid submission error: {0}', exc)
log.warning(u'acoustid submission error: {0}', exc)
del data[:]
for item in items:
@ -295,7 +295,7 @@ def fingerprint_item(log, item, write=False):
log.info(u'{0}: fingerprinting',
util.displayable_path(item.path))
try:
_, fp = acoustid.fingerprint_file(item.path)
_, fp = acoustid.fingerprint_file(util.syspath(item.path))
item.acoustid_fingerprint = fp
if write:
log.info(u'{0}: writing fingerprint',

View file

@ -22,13 +22,17 @@ import threading
import subprocess
import tempfile
import shlex
import six
from string import Template
import platform
from beets import ui, util, plugins, config
from beets.plugins import BeetsPlugin
from beets.util.confit import ConfigTypeError
from beets import art
from beets.util.artresizer import ArtResizer
from beets.library import parse_query_string
from beets.library import Item
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
@ -47,14 +51,15 @@ def replace_ext(path, ext):
The new extension must not contain a leading dot.
"""
return os.path.splitext(path)[0] + b'.' + ext
ext_dot = b'.' + ext
return os.path.splitext(path)[0] + ext_dot
def get_format(fmt=None):
"""Return the command template and the extension from the config.
"""
if not fmt:
fmt = config['convert']['format'].get(unicode).lower()
fmt = config['convert']['format'].as_str().lower()
fmt = ALIASES.get(fmt, fmt)
try:
@ -67,28 +72,34 @@ def get_format(fmt=None):
.format(fmt)
)
except ConfigTypeError:
command = config['convert']['formats'][fmt].get(bytes)
command = config['convert']['formats'][fmt].get(str)
extension = fmt
# Convenience and backwards-compatibility shortcuts.
keys = config['convert'].keys()
if 'command' in keys:
command = config['convert']['command'].get(unicode)
command = config['convert']['command'].as_str()
elif 'opts' in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
command = u'ffmpeg -i $source -y {0} $dest'.format(
config['convert']['opts'].get(unicode)
config['convert']['opts'].as_str()
)
if 'extension' in keys:
extension = config['convert']['extension'].get(unicode)
extension = config['convert']['extension'].as_str()
return (command.encode('utf8'), extension.encode('utf8'))
return (command.encode('utf-8'), extension.encode('utf-8'))
def should_transcode(item, fmt):
"""Determine whether the item should be transcoded as part of
conversion (i.e., its bitrate is high or it has the wrong format).
"""
no_convert_queries = config['convert']['no_convert'].as_str_seq()
if no_convert_queries:
for query_string in no_convert_queries:
query, _ = parse_query_string(query_string, Item)
if query.match(item):
return False
if config['convert']['never_convert_lossy_files'] and \
not (item.format.lower() in LOSSLESS_FORMATS):
return False
@ -107,8 +118,8 @@ class ConvertPlugin(BeetsPlugin):
u'format': u'mp3',
u'formats': {
u'aac': {
u'command': u'ffmpeg -i $source -y -vn -acodec libfaac '
u'-aq 100 $dest',
u'command': u'ffmpeg -i $source -y -vn -acodec aac '
u'-aq 1 $dest',
u'extension': u'm4a',
},
u'alac': {
@ -130,11 +141,12 @@ class ConvertPlugin(BeetsPlugin):
u'quiet': False,
u'embed': True,
u'paths': {},
u'no_convert': u'',
u'never_convert_lossy_files': False,
u'copy_album_art': False,
u'album_art_maxwidth': 0,
})
self.import_stages = [self.auto_convert]
self.early_import_stages = [self.auto_convert]
self.register_listener('import_task_files', self._cleanup)
@ -181,27 +193,48 @@ class ConvertPlugin(BeetsPlugin):
if not quiet and not pretend:
self._log.info(u'Encoding {0}', util.displayable_path(source))
# On Python 3, we need to construct the command to invoke as a
# Unicode string. On Unix, this is a little unfortunate---the OS is
# expecting bytes---so we use surrogate escaping and decode with the
# argument encoding, which is the same encoding that will then be
# *reversed* to recover the same bytes before invoking the OS. On
# Windows, we want to preserve the Unicode filename "as is."
if not six.PY2:
command = command.decode(util.arg_encoding(), 'surrogateescape')
if platform.system() == 'Windows':
source = source.decode(util._fsencoding())
dest = dest.decode(util._fsencoding())
else:
source = source.decode(util.arg_encoding(), 'surrogateescape')
dest = dest.decode(util.arg_encoding(), 'surrogateescape')
# Substitute $source and $dest in the argument list.
args = shlex.split(command)
encode_cmd = []
for i, arg in enumerate(args):
args[i] = Template(arg).safe_substitute({
'source': source,
'dest': dest,
})
if six.PY2:
encode_cmd.append(args[i])
else:
encode_cmd.append(args[i].encode(util.arg_encoding()))
if pretend:
self._log.info(u' '.join(ui.decargs(args)))
self._log.info(u'{0}', u' '.join(ui.decargs(args)))
return
try:
util.command_output(args)
util.command_output(encode_cmd)
except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files
self._log.info(u'Encoding {0} failed. Cleaning up...',
util.displayable_path(source))
self._log.debug(u'Command {0} exited with status {1}',
exc.cmd.decode('utf8', 'ignore'),
exc.returncode)
self._log.debug(u'Command {0} exited with status {1}: {2}',
args,
exc.returncode,
exc.output)
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
raise
@ -218,6 +251,9 @@ class ConvertPlugin(BeetsPlugin):
def convert_item(self, dest_dir, keep_new, path_formats, fmt,
pretend=False):
"""A pipeline thread that converts `Item` objects from a
library.
"""
command, ext = get_format(fmt)
item, original, converted = None, None, None
while True:
@ -369,61 +405,66 @@ class ConvertPlugin(BeetsPlugin):
util.copy(album.artpath, dest)
def convert_func(self, lib, opts, args):
if not opts.dest:
opts.dest = self.config['dest'].get()
if not opts.dest:
dest = opts.dest or self.config['dest'].get()
if not dest:
raise ui.UserError(u'no convert destination set')
opts.dest = util.bytestring_path(opts.dest)
dest = util.bytestring_path(dest)
if not opts.threads:
opts.threads = self.config['threads'].get(int)
threads = opts.threads or self.config['threads'].get(int)
if self.config['paths']:
path_formats = ui.get_path_formats(self.config['paths'])
path_formats = ui.get_path_formats(self.config['paths'] or None)
fmt = opts.format or self.config['format'].as_str().lower()
if opts.pretend is not None:
pretend = opts.pretend
else:
path_formats = ui.get_path_formats()
if not opts.format:
opts.format = self.config['format'].get(unicode).lower()
pretend = opts.pretend if opts.pretend is not None else \
self.config['pretend'].get(bool)
if not pretend:
ui.commands.list_items(lib, ui.decargs(args), opts.album)
if not (opts.yes or ui.input_yn(u"Convert? (Y/n)")):
return
pretend = self.config['pretend'].get(bool)
if opts.album:
albums = lib.albums(ui.decargs(args))
items = (i for a in albums for i in a.items())
if self.config['copy_album_art']:
for album in albums:
self.copy_album_art(album, opts.dest, path_formats,
pretend)
items = [i for a in albums for i in a.items()]
if not pretend:
for a in albums:
ui.print_(format(a, u''))
else:
items = iter(lib.items(ui.decargs(args)))
convert = [self.convert_item(opts.dest,
items = list(lib.items(ui.decargs(args)))
if not pretend:
for i in items:
ui.print_(format(i, u''))
if not items:
self._log.error(u'Empty query result.')
return
if not (pretend or opts.yes or ui.input_yn(u"Convert? (Y/n)")):
return
if opts.album and self.config['copy_album_art']:
for album in albums:
self.copy_album_art(album, dest, path_formats, pretend)
convert = [self.convert_item(dest,
opts.keep_new,
path_formats,
opts.format,
fmt,
pretend)
for _ in range(opts.threads)]
pipe = util.pipeline.Pipeline([items, convert])
for _ in range(threads)]
pipe = util.pipeline.Pipeline([iter(items), convert])
pipe.run_parallel()
def convert_on_import(self, lib, item):
"""Transcode a file automatically after it is imported into the
library.
"""
fmt = self.config['format'].get(unicode).lower()
fmt = self.config['format'].as_str().lower()
if should_transcode(item, fmt):
command, ext = get_format()
# Create a temporary file for the conversion.
tmpdir = self.config['tmpdir'].get()
fd, dest = tempfile.mkstemp('.' + ext, dir=tmpdir)
if tmpdir:
tmpdir = util.py3_path(util.bytestring_path(tmpdir))
fd, dest = tempfile.mkstemp(util.py3_path(b'.' + ext), dir=tmpdir)
os.close(fd)
dest = util.bytestring_path(dest)
_temp_files.append(dest) # Delete the transcode later.

View file

@ -35,7 +35,7 @@ class CuePlugin(BeetsPlugin):
return
if len(cues) > 1:
self._log.info(u"Found multiple cue files doing nothing: {0}",
map(displayable_path, cues))
list(map(displayable_path, cues)))
cue_file = cues[0]
self._log.info("Found {} for {}", displayable_path(cue_file), item)

View file

@ -19,31 +19,28 @@ discogs-client library.
from __future__ import division, absolute_import, print_function
import beets.ui
from beets import logging
from beets import config
from beets.autotag.hooks import AlbumInfo, TrackInfo, Distance
from beets.plugins import BeetsPlugin
from beets.util import confit
from discogs_client import Release, Client
from discogs_client import Release, Master, Client
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
from six.moves import http_client
import beets
import re
import time
import json
import socket
import httplib
import os
import traceback
from string import ascii_lowercase
# Silence spurious INFO log lines generated by urllib3.
urllib3_logger = logging.getLogger('requests.packages.urllib3')
urllib3_logger.setLevel(logging.CRITICAL)
USER_AGENT = u'beets/{0} +http://beets.io/'.format(beets.__version__)
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (ConnectionError, socket.error, httplib.HTTPException,
CONNECTION_ERRORS = (ConnectionError, socket.error, http_client.HTTPException,
ValueError, # JSON decoding raises a ValueError.
DiscogsAPIError)
@ -57,17 +54,25 @@ class DiscogsPlugin(BeetsPlugin):
'apisecret': 'plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy',
'tokenfile': 'discogs_token.json',
'source_weight': 0.5,
'user_token': '',
})
self.config['apikey'].redact = True
self.config['apisecret'].redact = True
self.config['user_token'].redact = True
self.discogs_client = None
self.register_listener('import_begin', self.setup)
def setup(self, session=None):
"""Create the `discogs_client` field. Authenticate if necessary.
"""
c_key = self.config['apikey'].get(unicode)
c_secret = self.config['apisecret'].get(unicode)
c_key = self.config['apikey'].as_str()
c_secret = self.config['apisecret'].as_str()
# Try using a configured user token (bypassing OAuth login).
user_token = self.config['user_token'].as_str()
if user_token:
self.discogs_client = Client(USER_AGENT, user_token=user_token)
return
# Get the OAuth token from a file or log in.
try:
@ -84,7 +89,7 @@ class DiscogsPlugin(BeetsPlugin):
token, secret)
def reset_auth(self):
"""Delete toke file & redo the auth steps.
"""Delete token file & redo the auth steps.
"""
os.remove(self._tokenfile())
self.setup()
@ -194,13 +199,13 @@ class DiscogsPlugin(BeetsPlugin):
# cause a query to return no results, even if they match the artist or
# album title. Use `re.UNICODE` flag to avoid stripping non-english
# word characters.
# TEMPORARY: Encode as ASCII to work around a bug:
# FIXME: Encode as ASCII to work around a bug:
# https://github.com/beetbox/beets/issues/1051
# When the library is fixed, we should encode as UTF-8.
query = re.sub(r'(?u)\W+', ' ', query).encode('ascii', "replace")
# Strip medium information from query, Things like "CD1" and "disk 1"
# can also negate an otherwise positive result.
query = re.sub(r'(?i)\b(CD|disc)\s*\d+', '', query)
query = re.sub(br'(?i)\b(CD|disc)\s*\d+', b'', query)
try:
releases = self.discogs_client.search(query,
type='release').page(1)
@ -208,11 +213,48 @@ class DiscogsPlugin(BeetsPlugin):
self._log.debug(u"Communication error while searching for {0!r}",
query, exc_info=True)
return []
return [self.get_album_info(release) for release in releases[:5]]
return [album for album in map(self.get_album_info, releases[:5])
if album]
def get_master_year(self, master_id):
"""Fetches a master release given its Discogs ID and returns its year
or None if the master release is not found.
"""
self._log.debug(u'Searching for master release {0}', master_id)
result = Master(self.discogs_client, {'id': master_id})
try:
year = result.fetch('year')
return year
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(u'API Error: {0} (query: {1})', e, result._uri)
if e.status_code == 401:
self.reset_auth()
return self.get_master_year(master_id)
return None
except CONNECTION_ERRORS:
self._log.debug(u'Connection error in master release lookup',
exc_info=True)
return None
def get_album_info(self, result):
"""Returns an AlbumInfo object for a discogs Release object.
"""
# Explicitly reload the `Release` fields, as they might not be yet
# present if the result is from a `discogs_client.search()`.
if not result.data.get('artists'):
result.refresh()
# Sanity check for required fields. The list of required fields is
# defined at Guideline 1.3.1.a, but in practice some releases might be
# lacking some of these fields. This function expects at least:
# `artists` (>0), `title`, `id`, `tracklist` (>0)
# https://www.discogs.com/help/doc/submission-guidelines-general-rules
if not all([result.data.get(k) for k in ['artists', 'title', 'id',
'tracklist']]):
self._log.warn(u"Release does not contain the required fields")
return None
artist, artist_id = self.get_artist([a.data for a in result.artists])
album = re.sub(r' +', ' ', result.title)
album_id = result.data['id']
@ -221,28 +263,53 @@ class DiscogsPlugin(BeetsPlugin):
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
tracks = self.get_tracks(result.data['tracklist'])
# Extract information for the optional AlbumInfo fields, if possible.
va = result.data['artists'][0].get('name', '').lower() == 'various'
year = result.data.get('year')
mediums = [t.medium for t in tracks]
country = result.data.get('country')
data_url = result.data.get('uri')
# Extract information for the optional AlbumInfo fields that are
# contained on nested discogs fields.
albumtype = media = label = catalogno = None
if result.data.get('formats'):
albumtype = ', '.join(
result.data['formats'][0].get('descriptions', [])) or None
va = result.data['artists'][0]['name'].lower() == 'various'
media = result.data['formats'][0]['name']
if result.data.get('labels'):
label = result.data['labels'][0].get('name')
catalogno = result.data['labels'][0].get('catno')
# Additional cleanups (various artists name, catalog number, media).
if va:
artist = config['va_name'].get(unicode)
year = result.data['year']
label = result.data['labels'][0]['name']
mediums = len(set(t.medium for t in tracks))
catalogno = result.data['labels'][0]['catno']
artist = config['va_name'].as_str()
if catalogno == 'none':
catalogno = None
country = result.data.get('country')
media = result.data['formats'][0]['name']
data_url = result.data['uri']
# Explicitly set the `media` for the tracks, since it is expected by
# `autotag.apply_metadata`, and set `medium_total`.
for track in tracks:
track.media = media
track.medium_total = mediums.count(track.medium)
# Discogs does not have track IDs. Invent our own IDs as proposed
# in #2336.
track.track_id = str(album_id) + "-" + track.track_alt
# Retrieve master release id (returns None if there isn't one).
master_id = result.data.get('master_id')
# Assume `original_year` is equal to `year` for releases without
# a master release, otherwise fetch the master release.
original_year = self.get_master_year(master_id) if master_id else year
return AlbumInfo(album, album_id, artist, artist_id, tracks, asin=None,
albumtype=albumtype, va=va, year=year, month=None,
day=None, label=label, mediums=mediums,
artist_sort=None, releasegroup_id=None,
day=None, label=label, mediums=len(set(mediums)),
artist_sort=None, releasegroup_id=master_id,
catalognum=catalogno, script=None, language=None,
country=country, albumstatus=None, media=media,
albumdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_year=original_year, original_month=None,
original_day=None, data_source='Discogs',
data_url=data_url)
@ -269,38 +336,71 @@ class DiscogsPlugin(BeetsPlugin):
def get_tracks(self, tracklist):
"""Returns a list of TrackInfo objects for a discogs tracklist.
"""
try:
clean_tracklist = self.coalesce_tracks(tracklist)
except Exception as exc:
# FIXME: this is an extra precaution for making sure there are no
# side effects after #2222. It should be removed after further
# testing.
self._log.debug(u'{}', traceback.format_exc())
self._log.error(u'uncaught exception in coalesce_tracks: {}', exc)
clean_tracklist = tracklist
tracks = []
index_tracks = {}
index = 0
for track in tracklist:
for track in clean_tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track['position']:
index += 1
tracks.append(self.get_track_info(track, index))
track_info = self.get_track_info(track, index)
track_info.track_alt = track['position']
tracks.append(track_info)
else:
index_tracks[index + 1] = track['title']
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
medium_count, index_count = 0, 0
medium_count, index_count, side_count = 0, 0, 0
sides_per_medium = 1
# If a medium has two sides (ie. vinyl or cassette), each pair of
# consecutive sides should belong to the same medium.
if all([track.medium is not None for track in tracks]):
m = sorted(set([track.medium.lower() for track in tracks]))
# If all track.medium are single consecutive letters, assume it is
# a 2-sided medium.
if ''.join(m) in ascii_lowercase:
sides_per_medium = 2
for track in tracks:
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
# side_count is the number of mediums or medium sides (in the case
# of two-sided mediums) that were seen before.
medium_is_index = track.medium and not track.medium_index and (
len(track.medium) != 1 or
ord(track.medium) - 64 != medium_count + 1
# Not within standard incremental medium values (A, B, C, ...).
ord(track.medium) - 64 != side_count + 1
)
if not medium_is_index and medium != track.medium:
# Increment medium_count and reset index_count when medium
# changes.
medium = track.medium
side_count += 1
if sides_per_medium == 2:
if side_count % sides_per_medium:
# Two-sided medium changed. Reset index_count.
index_count = 0
medium_count += 1
else:
# Medium changed. Reset index_count.
medium_count += 1
index_count = 0
medium = track.medium
index_count += 1
medium_count = 1 if medium_count == 0 else medium_count
track.medium, track.medium_index = medium_count, index_count
# Get `disctitle` from Discogs index tracks. Assume that an index track
@ -315,30 +415,122 @@ class DiscogsPlugin(BeetsPlugin):
return tracks
def coalesce_tracks(self, raw_tracklist):
"""Pre-process a tracklist, merging subtracks into a single track. The
title for the merged track is the one from the previous index track,
if present; otherwise it is a combination of the subtracks titles.
"""
def add_merged_subtracks(tracklist, subtracks):
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = \
self.get_track_index(subtracks[0]['position'])
position = '%s%s' % (idx or '', medium_idx or '')
if tracklist and not tracklist[-1]['position']:
# Assume the previous index track contains the track title.
if sub_idx:
# "Convert" the track title to a real track, discarding the
# subtracks assuming they are logical divisions of a
# physical track (12.2.9 Subtracks).
tracklist[-1]['position'] = position
else:
# Promote the subtracks to real tracks, discarding the
# index track, assuming the subtracks are physical tracks.
index_track = tracklist.pop()
# Fix artists when they are specified on the index track.
if index_track.get('artists'):
for subtrack in subtracks:
if not subtrack.get('artists'):
subtrack['artists'] = index_track['artists']
tracklist.extend(subtracks)
else:
# Merge the subtracks, pick a title, and append the new track.
track = subtracks[0].copy()
track['title'] = ' / '.join([t['title'] for t in subtracks])
tracklist.append(track)
# Pre-process the tracklist, trying to identify subtracks.
subtracks = []
tracklist = []
prev_subindex = ''
for track in raw_tracklist:
# Regular subtrack (track with subindex).
if track['position']:
_, _, subindex = self.get_track_index(track['position'])
if subindex:
if subindex.rjust(len(raw_tracklist)) > prev_subindex:
# Subtrack still part of the current main track.
subtracks.append(track)
else:
# Subtrack part of a new group (..., 1.3, *2.1*, ...).
add_merged_subtracks(tracklist, subtracks)
subtracks = [track]
prev_subindex = subindex.rjust(len(raw_tracklist))
continue
# Index track with nested sub_tracks.
if not track['position'] and 'sub_tracks' in track:
# Append the index track, assuming it contains the track title.
tracklist.append(track)
add_merged_subtracks(tracklist, track['sub_tracks'])
continue
# Regular track or index track without nested sub_tracks.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
subtracks = []
prev_subindex = ''
tracklist.append(track)
# Merge and add the remaining subtracks, if any.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
return tracklist
def get_track_info(self, track, index):
"""Returns a TrackInfo object for a discogs track.
"""
title = track['title']
track_id = None
medium, medium_index = self.get_track_index(track['position'])
medium, medium_index, _ = self.get_track_index(track['position'])
artist, artist_id = self.get_artist(track.get('artists', []))
length = self.get_track_length(track['duration'])
return TrackInfo(title, track_id, artist, artist_id, length, index,
medium, medium_index, artist_sort=None,
disctitle=None, artist_credit=None)
return TrackInfo(title, track_id, artist=artist, artist_id=artist_id,
length=length, index=index,
medium=medium, medium_index=medium_index,
artist_sort=None, disctitle=None, artist_credit=None)
def get_track_index(self, position):
"""Returns the medium and medium index for a discogs track position.
"""
# medium_index is a number at the end of position. medium is everything
# else. E.g. (A)(1), (Side A, Track )(1), (A)(), ()(1), etc.
match = re.match(r'^(.*?)(\d*)$', position.upper())
"""Returns the medium, medium index and subtrack index for a discogs
track position."""
# Match the standard Discogs positions (12.2.9), which can have several
# forms (1, 1-1, A1, A1.1, A1a, ...).
match = re.match(
r'^(.*?)' # medium: everything before medium_index.
r'(\d*?)' # medium_index: a number at the end of
# `position`, except if followed by a subtrack
# index.
# subtrack_index: can only be matched if medium
# or medium_index have been matched, and can be
r'((?<=\w)\.[\w]+' # - a dot followed by a string (A.1, 2.A)
r'|(?<=\d)[A-Z]+' # - a string that follows a number (1A, B2a)
r')?'
r'$',
position.upper()
)
if match:
medium, index = match.groups()
medium, index, subindex = match.groups()
if subindex and subindex.startswith('.'):
subindex = subindex[1:]
else:
self._log.debug(u'Invalid position: {0}', position)
medium = index = None
return medium or None, index or None
medium = index = subindex = None
return medium or None, index or None, subindex or None
def get_track_length(self, duration):
"""Returns the track length in seconds for a discogs duration.

View file

@ -20,9 +20,11 @@ from __future__ import division, absolute_import, print_function
import shlex
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, vararg_callback, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess
from beets.ui import decargs, print_, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess, \
bytestring_path, MoveOperation
from beets.library import Item, Album
import six
PLUGIN = 'duplicates'
@ -79,10 +81,9 @@ class DuplicatesPlugin(BeetsPlugin):
help=u'report duplicates only if all attributes are set',
)
self._command.parser.add_option(
u'-k', u'--keys', dest='keys',
action='callback', metavar='KEY1 KEY2',
callback=vararg_callback,
help=u'report duplicates based on keys',
u'-k', u'--key', dest='keys',
action='append', metavar='KEY',
help=u'report duplicates based on keys (use multiple times)',
)
self._command.parser.add_option(
u'-M', u'--merge', dest='merge',
@ -112,14 +113,14 @@ class DuplicatesPlugin(BeetsPlugin):
self.config.set_args(opts)
album = self.config['album'].get(bool)
checksum = self.config['checksum'].get(str)
copy = self.config['copy'].get(str)
copy = bytestring_path(self.config['copy'].as_str())
count = self.config['count'].get(bool)
delete = self.config['delete'].get(bool)
fmt = self.config['format'].get(str)
full = self.config['full'].get(bool)
keys = self.config['keys'].get(list)
keys = self.config['keys'].as_str_seq()
merge = self.config['merge'].get(bool)
move = self.config['move'].get(str)
move = bytestring_path(self.config['move'].as_str())
path = self.config['path'].get(bool)
tiebreak = self.config['tiebreak'].get(dict)
strict = self.config['strict'].get(bool)
@ -135,15 +136,15 @@ class DuplicatesPlugin(BeetsPlugin):
items = lib.items(decargs(args))
if path:
fmt = '$path'
fmt = u'$path'
# Default format string for count mode.
if count and not fmt:
if album:
fmt = '$albumartist - $album'
fmt = u'$albumartist - $album'
else:
fmt = '$albumartist - $album - $title'
fmt += ': {0}'
fmt = u'$albumartist - $album - $title'
fmt += u': {0}'
if checksum:
for i in items:
@ -169,22 +170,22 @@ class DuplicatesPlugin(BeetsPlugin):
return [self._command]
def _process_item(self, item, copy=False, move=False, delete=False,
tag=False, fmt=''):
tag=False, fmt=u''):
"""Process Item `item`.
"""
print_(format(item, fmt))
if copy:
item.move(basedir=copy, copy=True)
item.move(basedir=copy, operation=MoveOperation.COPY)
item.store()
if move:
item.move(basedir=move, copy=False)
item.move(basedir=move)
item.store()
if delete:
item.remove(delete=True)
if tag:
try:
k, v = tag.split('=')
except:
except Exception:
raise UserError(
u"{}: can't parse k=v tag: {}".format(PLUGIN, tag)
)
@ -252,20 +253,19 @@ class DuplicatesPlugin(BeetsPlugin):
"completeness" (objects with more non-null fields come first)
and Albums are ordered by their track count.
"""
if tiebreak:
kind = 'items' if all(isinstance(o, Item)
for o in objs) else 'albums'
kind = 'items' if all(isinstance(o, Item) for o in objs) else 'albums'
if tiebreak and kind in tiebreak.keys():
key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind])
else:
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
if kind == 'items':
def truthy(v):
# Avoid a Unicode warning by avoiding comparison
# between a bytes object and the empty Unicode
# string ''.
return v is not None and \
(v != '' if isinstance(v, unicode) else True)
fields = kind.all_keys()
(v != '' if isinstance(v, six.text_type) else True)
fields = Item.all_keys()
key = lambda x: sum(1 for f in fields if truthy(getattr(x, f)))
else:
key = lambda x: len(x.items())
@ -311,7 +311,7 @@ class DuplicatesPlugin(BeetsPlugin):
objs[0],
displayable_path(o.path),
displayable_path(missing.destination()))
missing.move(copy=True)
missing.move(operation=MoveOperation.COPY)
return objs
def _merge(self, objs):
@ -329,7 +329,7 @@ class DuplicatesPlugin(BeetsPlugin):
"""Generate triples of keys, duplicate counts, and constituent objects.
"""
offset = 0 if full else 1
for k, objs in self._group_by(objs, keys, strict).iteritems():
for k, objs in self._group_by(objs, keys, strict).items():
if len(objs) > 1:
objs = self._order(objs, tiebreak)
if merge:

View file

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016
#
@ -22,11 +23,12 @@ from beets import ui
from beets.dbcore import types
from beets.importer import action
from beets.ui.commands import _do_query, PromptChoice
from copy import deepcopy
import codecs
import subprocess
import yaml
from tempfile import NamedTemporaryFile
import os
import six
# These "safe" types can avoid the format/parse cycle that most fields go
@ -82,7 +84,7 @@ def load(s):
# Convert all keys to strings. They started out as strings,
# but the user may have inadvertently messed this up.
out.append({unicode(k): v for k, v in d.items()})
out.append({six.text_type(k): v for k, v in d.items()})
except yaml.YAMLError as e:
raise ParseError(u'invalid YAML: {}'.format(e))
@ -141,7 +143,7 @@ def apply_(obj, data):
else:
# Either the field was stringified originally or the user changed
# it from a safe type to an unsafe one. Parse it as a string.
obj.set_parse(key, unicode(value))
obj.set_parse(key, six.text_type(value))
class EditPlugin(plugins.BeetsPlugin):
@ -242,9 +244,15 @@ class EditPlugin(plugins.BeetsPlugin):
old_data = [flatten(o, fields) for o in objs]
# Set up a temporary file with the initial data for editing.
new = NamedTemporaryFile(suffix='.yaml', delete=False)
if six.PY2:
new = NamedTemporaryFile(mode='w', suffix='.yaml', delete=False)
else:
new = NamedTemporaryFile(mode='w', suffix='.yaml', delete=False,
encoding='utf-8')
old_str = dump(old_data)
new.write(old_str)
if six.PY2:
old_str = old_str.decode('utf-8')
new.close()
# Loop until we have parseable data and the user confirms.
@ -255,7 +263,7 @@ class EditPlugin(plugins.BeetsPlugin):
# Read the data back after editing and check whether anything
# changed.
with open(new.name) as f:
with codecs.open(new.name, encoding='utf-8') as f:
new_str = f.read()
if new_str == old_str:
ui.print_(u"No changes; aborting.")
@ -274,7 +282,7 @@ class EditPlugin(plugins.BeetsPlugin):
# Show the changes.
# If the objects are not on the DB yet, we need a copy of their
# original state for show_model_changes.
objs_old = [deepcopy(obj) if not obj._db else None
objs_old = [obj.copy() if obj.id < 0 else None
for obj in objs]
self.apply_data(objs, old_data, new_data)
changed = False
@ -293,9 +301,13 @@ class EditPlugin(plugins.BeetsPlugin):
elif choice == u'c': # Cancel.
return False
elif choice == u'e': # Keep editing.
# Reset the temporary changes to the objects.
# Reset the temporary changes to the objects. I we have a
# copy from above, use that, else reload from the database.
objs = [(old_obj or obj)
for old_obj, obj in zip(objs_old, objs)]
for obj in objs:
obj.read()
if not obj.id < 0:
obj.load()
continue
# Remove the temporary file before returning.
@ -310,7 +322,7 @@ class EditPlugin(plugins.BeetsPlugin):
are temporary.
"""
if len(old_data) != len(new_data):
self._log.warn(u'number of objects changed from {} to {}',
self._log.warning(u'number of objects changed from {} to {}',
len(old_data), len(new_data))
obj_by_id = {o.id: o for o in objs}
@ -321,7 +333,7 @@ class EditPlugin(plugins.BeetsPlugin):
forbidden = False
for key in ignore_fields:
if old_dict.get(key) != new_dict.get(key):
self._log.warn(u'ignoring object whose {} changed', key)
self._log.warning(u'ignoring object whose {} changed', key)
forbidden = True
break
if forbidden:
@ -356,9 +368,13 @@ class EditPlugin(plugins.BeetsPlugin):
"""Callback for invoking the functionality during an interactive
import session on the *original* item tags.
"""
# Assign temporary ids to the Items.
for i, obj in enumerate(task.items):
obj.id = i + 1
# Assign negative temporary ids to Items that are not in the database
# yet. By using negative values, no clash with items in the database
# can occur.
for i, obj in enumerate(task.items, start=1):
# The importer may set the id to None when re-importing albums.
if not obj._db or obj.id is None:
obj.id = -i
# Present the YAML to the user and let her change it.
fields = self._get_fields(album=False, extra=[])
@ -366,6 +382,7 @@ class EditPlugin(plugins.BeetsPlugin):
# Remove temporary ids.
for obj in task.items:
if obj.id < 0:
obj.id = None
# Save the new data.

View file

@ -20,13 +20,35 @@ import os.path
from beets.plugins import BeetsPlugin
from beets import ui
from beets.ui import decargs
from beets.ui import print_, decargs
from beets.util import syspath, normpath, displayable_path, bytestring_path
from beets.util.artresizer import ArtResizer
from beets import config
from beets import art
def _confirm(objs, album):
"""Show the list of affected objects (items or albums) and confirm
that the user wants to modify their artwork.
`album` is a Boolean indicating whether these are albums (as opposed
to items).
"""
noun = u'album' if album else u'file'
prompt = u'Modify artwork for {} {}{} (Y/n)?'.format(
len(objs),
noun,
u's' if len(objs) > 1 else u''
)
# Show all the items or albums.
for obj in objs:
print_(format(obj))
# Confirm with user.
return ui.input_yn(prompt)
class EmbedCoverArtPlugin(BeetsPlugin):
"""Allows albumart to be embedded into the actual files.
"""
@ -60,6 +82,9 @@ class EmbedCoverArtPlugin(BeetsPlugin):
embed_cmd.parser.add_option(
u'-f', u'--file', metavar='PATH', help=u'the image file to embed'
)
embed_cmd.parser.add_option(
u"-y", u"--yes", action="store_true", help=u"skip confirmation"
)
maxwidth = self.config['maxwidth'].get(int)
compare_threshold = self.config['compare_threshold'].get(int)
ifempty = self.config['ifempty'].get(bool)
@ -71,11 +96,24 @@ class EmbedCoverArtPlugin(BeetsPlugin):
raise ui.UserError(u'image file {0} not found'.format(
displayable_path(imagepath)
))
for item in lib.items(decargs(args)):
items = lib.items(decargs(args))
# Confirm with user.
if not opts.yes and not _confirm(items, not opts.file):
return
for item in items:
art.embed_item(self._log, item, imagepath, maxwidth, None,
compare_threshold, ifempty)
else:
for album in lib.albums(decargs(args)):
albums = lib.albums(decargs(args))
# Confirm with user.
if not opts.yes and not _confirm(albums, not opts.file):
return
for album in albums:
art.embed_album(self._log, album, maxwidth, False,
compare_threshold, ifempty)
self.remove_artfile(album)
@ -107,7 +145,7 @@ class EmbedCoverArtPlugin(BeetsPlugin):
else:
filename = bytestring_path(opts.filename or
config['art_filename'].get())
if os.path.dirname(filename) != '':
if os.path.dirname(filename) != b'':
self._log.error(
u"Only specify a name rather than a path for -n")
return
@ -125,8 +163,15 @@ class EmbedCoverArtPlugin(BeetsPlugin):
'clearart',
help=u'remove images from file metadata',
)
clear_cmd.parser.add_option(
u"-y", u"--yes", action="store_true", help=u"skip confirmation"
)
def clear_func(lib, opts, args):
items = lib.items(decargs(args))
# Confirm with user.
if not opts.yes and not _confirm(items, False):
return
art.clear(self._log, lib, decargs(args))
clear_cmd.func = clear_func

View file

@ -6,22 +6,51 @@
host: localhost
port: 8096
username: user
apikey: apikey
password: password
"""
from __future__ import division, absolute_import, print_function
from beets import config
from beets.plugins import BeetsPlugin
from urllib import urlencode
from urlparse import urljoin, parse_qs, urlsplit, urlunsplit
import hashlib
import requests
from six.moves.urllib.parse import urlencode
from six.moves.urllib.parse import urljoin, parse_qs, urlsplit, urlunsplit
from beets import config
from beets.plugins import BeetsPlugin
def api_url(host, port, endpoint):
"""Returns a joined url.
Takes host, port and endpoint and generates a valid emby API url.
:param host: Hostname of the emby server
:param port: Portnumber of the emby server
:param endpoint: API endpoint
:type host: str
:type port: int
:type endpoint: str
:returns: Full API url
:rtype: str
"""
joined = urljoin('http://{0}:{1}'.format(host, port), endpoint)
# check if http or https is defined as host and create hostname
hostname_list = [host]
if host.startswith('http://') or host.startswith('https://'):
hostname = ''.join(hostname_list)
else:
hostname_list.insert(0, 'http://')
hostname = ''.join(hostname_list)
joined = urljoin(
'{hostname}:{port}'.format(
hostname=hostname,
port=port
),
endpoint
)
scheme, netloc, path, query_string, fragment = urlsplit(joined)
query_params = parse_qs(query_string)
@ -33,34 +62,62 @@ def api_url(host, port, endpoint):
def password_data(username, password):
"""Returns a dict with username and its encoded password.
:param username: Emby username
:param password: Emby password
:type username: str
:type password: str
:returns: Dictionary with username and encoded password
:rtype: dict
"""
return {
'username': username,
'password': hashlib.sha1(password).hexdigest(),
'passwordMd5': hashlib.md5(password).hexdigest()
'password': hashlib.sha1(password.encode('utf-8')).hexdigest(),
'passwordMd5': hashlib.md5(password.encode('utf-8')).hexdigest()
}
def create_headers(user_id, token=None):
"""Return header dict that is needed to talk to the Emby API.
:param user_id: Emby user ID
:param token: Authentication token for Emby
:type user_id: str
:type token: str
:returns: Headers for requests
:rtype: dict
"""
headers = {
'Authorization': 'MediaBrowser',
'UserId': user_id,
'Client': 'other',
'Device': 'empy',
'DeviceId': 'beets',
'Version': '0.0.0'
}
headers = {}
authorization = (
'MediaBrowser UserId="{user_id}", '
'Client="other", '
'Device="beets", '
'DeviceId="beets", '
'Version="0.0.0"'
).format(user_id=user_id)
headers['x-emby-authorization'] = authorization
if token:
headers['X-MediaBrowser-Token'] = token
headers['x-mediabrowser-token'] = token
return headers
def get_token(host, port, headers, auth_data):
"""Return token for a user.
:param host: Emby host
:param port: Emby port
:param headers: Headers for requests
:param auth_data: Username and encoded password for authentication
:type host: str
:type port: int
:type headers: dict
:type auth_data: dict
:returns: Access Token
:rtype: str
"""
url = api_url(host, port, '/Users/AuthenticateByName')
r = requests.post(url, headers=headers, data=auth_data)
@ -70,6 +127,15 @@ def get_token(host, port, headers, auth_data):
def get_user(host, port, username):
"""Return user dict from server or None if there is no user.
:param host: Emby host
:param port: Emby port
:username: Username
:type host: str
:type port: int
:type username: str
:returns: Matched Users
:rtype: list
"""
url = api_url(host, port, '/Users/Public')
r = requests.get(url)
@ -84,8 +150,10 @@ class EmbyUpdate(BeetsPlugin):
# Adding defaults.
config['emby'].add({
u'host': u'localhost',
u'port': 8096
u'host': u'http://localhost',
u'port': 8096,
u'apikey': None,
u'password': None,
})
self.register_listener('database_change', self.listen_for_db_change)
@ -104,6 +172,12 @@ class EmbyUpdate(BeetsPlugin):
port = config['emby']['port'].get()
username = config['emby']['username'].get()
password = config['emby']['password'].get()
token = config['emby']['apikey'].get()
# Check if at least a apikey or password is given.
if not any([password, token]):
self._log.warning(u'Provide at least Emby password or apikey.')
return
# Get user information from the Emby API.
user = get_user(host, port, username)
@ -111,6 +185,7 @@ class EmbyUpdate(BeetsPlugin):
self._log.warning(u'User {0} could not be found.'.format(username))
return
if not token:
# Create Authentication data and headers.
auth_data = password_data(username, password)
headers = create_headers(user[0]['Id'])

View file

@ -29,8 +29,11 @@ from beets import importer
from beets import ui
from beets import util
from beets import config
from beets.mediafile import image_mime_type
from beets.util.artresizer import ArtResizer
from beets.util import confit
from beets.util import syspath, bytestring_path, py3_path
import six
try:
import itunes
@ -38,9 +41,11 @@ try:
except ImportError:
HAVE_ITUNES = False
IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg']
CONTENT_TYPES = ('image/jpeg', 'image/png')
DOWNLOAD_EXTENSION = '.jpg'
CONTENT_TYPES = {
'image/jpeg': [b'jpg', b'jpeg'],
'image/png': [b'png']
}
IMAGE_EXTENSIONS = [ext for exts in CONTENT_TYPES.values() for ext in exts]
class Candidate(object):
@ -64,7 +69,7 @@ class Candidate(object):
self.match = match
self.size = size
def _validate(self, extra):
def _validate(self, plugin):
"""Determine whether the candidate artwork is valid based on
its dimensions (width and ratio).
@ -75,9 +80,7 @@ class Candidate(object):
if not self.path:
return self.CANDIDATE_BAD
if not (extra['enforce_ratio'] or
extra['minwidth'] or
extra['maxwidth']):
if not (plugin.enforce_ratio or plugin.minwidth or plugin.maxwidth):
return self.CANDIDATE_EXACT
# get_size returns None if no local imaging backend is available
@ -96,22 +99,22 @@ class Candidate(object):
long_edge = max(self.size)
# Check minimum size.
if extra['minwidth'] and self.size[0] < extra['minwidth']:
if plugin.minwidth and self.size[0] < plugin.minwidth:
self._log.debug(u'image too small ({} < {})',
self.size[0], extra['minwidth'])
self.size[0], plugin.minwidth)
return self.CANDIDATE_BAD
# Check aspect ratio.
edge_diff = long_edge - short_edge
if extra['enforce_ratio']:
if extra['margin_px']:
if edge_diff > extra['margin_px']:
if plugin.enforce_ratio:
if plugin.margin_px:
if edge_diff > plugin.margin_px:
self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})',
long_edge, short_edge, extra['margin_px'])
long_edge, short_edge, plugin.margin_px)
return self.CANDIDATE_BAD
elif extra['margin_percent']:
margin_px = extra['margin_percent'] * long_edge
elif plugin.margin_percent:
margin_px = plugin.margin_percent * long_edge
if edge_diff > margin_px:
self._log.debug(u'image is not close enough to being '
u'square, ({} - {} > {})',
@ -124,20 +127,20 @@ class Candidate(object):
return self.CANDIDATE_BAD
# Check maximum size.
if extra['maxwidth'] and self.size[0] > extra['maxwidth']:
if plugin.maxwidth and self.size[0] > plugin.maxwidth:
self._log.debug(u'image needs resizing ({} > {})',
self.size[0], extra['maxwidth'])
self.size[0], plugin.maxwidth)
return self.CANDIDATE_DOWNSCALE
return self.CANDIDATE_EXACT
def validate(self, extra):
self.check = self._validate(extra)
def validate(self, plugin):
self.check = self._validate(plugin)
return self.check
def resize(self, extra):
if extra['maxwidth'] and self.check == self.CANDIDATE_DOWNSCALE:
self.path = ArtResizer.shared.resize(extra['maxwidth'], self.path)
def resize(self, plugin):
if plugin.maxwidth and self.check == self.CANDIDATE_DOWNSCALE:
self.path = ArtResizer.shared.resize(plugin.maxwidth, self.path)
def _logged_get(log, *args, **kwargs):
@ -189,17 +192,20 @@ class RequestMixin(object):
# ART SOURCES ################################################################
class ArtSource(RequestMixin):
def __init__(self, log, config):
VALID_MATCHING_CRITERIA = ['default']
def __init__(self, log, config, match_by=None):
self._log = log
self._config = config
self.match_by = match_by or self.VALID_MATCHING_CRITERIA
def get(self, album, extra):
def get(self, album, plugin, paths):
raise NotImplementedError()
def _candidate(self, **kwargs):
return Candidate(source=self, log=self._log, **kwargs)
def fetch_image(self, candidate, extra):
def fetch_image(self, candidate, plugin):
raise NotImplementedError()
@ -207,7 +213,7 @@ class LocalArtSource(ArtSource):
IS_LOCAL = True
LOC_STR = u'local'
def fetch_image(self, candidate, extra):
def fetch_image(self, candidate, plugin):
pass
@ -215,58 +221,94 @@ class RemoteArtSource(ArtSource):
IS_LOCAL = False
LOC_STR = u'remote'
def fetch_image(self, candidate, extra):
def fetch_image(self, candidate, plugin):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
if extra['maxwidth']:
candidate.url = ArtResizer.shared.proxy_url(extra['maxwidth'],
if plugin.maxwidth:
candidate.url = ArtResizer.shared.proxy_url(plugin.maxwidth,
candidate.url)
try:
with closing(self.request(candidate.url, stream=True,
message=u'downloading image')) as resp:
if 'Content-Type' not in resp.headers \
or resp.headers['Content-Type'] not in CONTENT_TYPES:
self._log.debug(
u'not a supported image: {}',
resp.headers.get('Content-Type') or u'no content type',
)
candidate.path = None
ct = resp.headers.get('Content-Type', None)
# Download the image to a temporary file. As some servers
# (notably fanart.tv) have proven to return wrong Content-Types
# when images were uploaded with a bad file extension, do not
# rely on it. Instead validate the type using the file magic
# and only then determine the extension.
data = resp.iter_content(chunk_size=1024)
header = b''
for chunk in data:
header += chunk
if len(header) >= 32:
# The imghdr module will only read 32 bytes, and our
# own additions in mediafile even less.
break
else:
# server didn't return enough data, i.e. corrupt image
return
# Generate a temporary file with the correct extension.
with NamedTemporaryFile(suffix=DOWNLOAD_EXTENSION,
delete=False) as fh:
for chunk in resp.iter_content(chunk_size=1024):
real_ct = image_mime_type(header)
if real_ct is None:
# detection by file magic failed, fall back to the
# server-supplied Content-Type
# Is our type detection failsafe enough to drop this?
real_ct = ct
if real_ct not in CONTENT_TYPES:
self._log.debug(u'not a supported image: {}',
real_ct or u'unknown content type')
return
ext = b'.' + CONTENT_TYPES[real_ct][0]
if real_ct != ct:
self._log.warning(u'Server specified {}, but returned a '
u'{} image. Correcting the extension '
u'to {}',
ct, real_ct, ext)
suffix = py3_path(ext)
with NamedTemporaryFile(suffix=suffix, delete=False) as fh:
# write the first already loaded part of the image
fh.write(header)
# download the remaining part of the image
for chunk in data:
fh.write(chunk)
self._log.debug(u'downloaded art to: {0}',
util.displayable_path(fh.name))
candidate.path = fh.name
candidate.path = util.bytestring_path(fh.name)
return
except (IOError, requests.RequestException, TypeError) as exc:
# Handling TypeError works around a urllib3 bug:
# https://github.com/shazow/urllib3/issues/556
self._log.debug(u'error fetching art: {}', exc)
candidate.path = None
return
class CoverArtArchive(RemoteArtSource):
NAME = u"Cover Art Archive"
VALID_MATCHING_CRITERIA = ['release', 'releasegroup']
if util.SNI_SUPPORTED:
URL = 'https://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'https://coverartarchive.org/release-group/{mbid}/front'
else:
URL = 'http://coverartarchive.org/release/{mbid}/front'
GROUP_URL = 'http://coverartarchive.org/release-group/{mbid}/front'
def get(self, album, extra):
def get(self, album, plugin, paths):
"""Return the Cover Art Archive and Cover Art Archive release group URLs
using album MusicBrainz release ID and release group ID.
"""
if album.mb_albumid:
if 'release' in self.match_by and album.mb_albumid:
yield self._candidate(url=self.URL.format(mbid=album.mb_albumid),
match=Candidate.MATCH_EXACT)
if album.mb_releasegroupid:
if 'releasegroup' in self.match_by and album.mb_releasegroupid:
yield self._candidate(
url=self.GROUP_URL.format(mbid=album.mb_releasegroupid),
match=Candidate.MATCH_FALLBACK)
@ -277,7 +319,7 @@ class Amazon(RemoteArtSource):
URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
INDICES = (1, 2)
def get(self, album, extra):
def get(self, album, plugin, paths):
"""Generate URLs using Amazon ID (ASIN) string.
"""
if album.asin:
@ -291,7 +333,7 @@ class AlbumArtOrg(RemoteArtSource):
URL = 'http://www.albumart.org/index_detail.php'
PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def get(self, album, extra):
def get(self, album, plugin, paths):
"""Return art URL from AlbumArt.org using album ASIN.
"""
if not album.asin:
@ -322,7 +364,7 @@ class GoogleImages(RemoteArtSource):
self.key = self._config['google_key'].get(),
self.cx = self._config['google_engine'].get(),
def get(self, album, extra):
def get(self, album, plugin, paths):
"""Return art URL from google custom search engine
given an album title and interpreter.
"""
@ -358,8 +400,7 @@ class GoogleImages(RemoteArtSource):
class FanartTV(RemoteArtSource):
"""Art from fanart.tv requested using their API"""
NAME = u"fanart.tv"
API_URL = 'http://webservice.fanart.tv/v3/'
API_URL = 'https://webservice.fanart.tv/v3/'
API_ALBUMS = API_URL + 'music/albums/'
PROJECT_KEY = '61a7d0ab4e67162b7a0c7c35915cd48e'
@ -367,7 +408,7 @@ class FanartTV(RemoteArtSource):
super(FanartTV, self).__init__(*args, **kwargs)
self.client_key = self._config['fanarttv_key'].get()
def get(self, album, extra):
def get(self, album, plugin, paths):
if not album.mb_releasegroupid:
return
@ -418,7 +459,7 @@ class FanartTV(RemoteArtSource):
class ITunesStore(RemoteArtSource):
NAME = u"iTunes Store"
def get(self, album, extra):
def get(self, album, plugin, paths):
"""Return art URL from iTunes Store given an album title.
"""
if not (album.albumartist and album.album):
@ -452,8 +493,8 @@ class ITunesStore(RemoteArtSource):
class Wikipedia(RemoteArtSource):
NAME = u"Wikipedia (queried through DBpedia)"
DBPEDIA_URL = 'http://dbpedia.org/sparql'
WIKIPEDIA_URL = 'http://en.wikipedia.org/w/api.php'
DBPEDIA_URL = 'https://dbpedia.org/sparql'
WIKIPEDIA_URL = 'https://en.wikipedia.org/w/api.php'
SPARQL_QUERY = u'''PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
PREFIX dbpprop: <http://dbpedia.org/property/>
PREFIX owl: <http://dbpedia.org/ontology/>
@ -476,7 +517,7 @@ class Wikipedia(RemoteArtSource):
}}
Limit 1'''
def get(self, album, extra):
def get(self, album, plugin, paths):
if not (album.albumartist and album.album):
return
@ -566,7 +607,7 @@ class Wikipedia(RemoteArtSource):
try:
data = wikipedia_response.json()
results = data['query']['pages']
for _, result in results.iteritems():
for _, result in results.items():
image_url = result['imageinfo'][0]['url']
yield self._candidate(url=image_url,
match=Candidate.MATCH_EXACT)
@ -588,26 +629,26 @@ class FileSystem(LocalArtSource):
"""
return [idx for (idx, x) in enumerate(cover_names) if x in filename]
def get(self, album, extra):
def get(self, album, plugin, paths):
"""Look for album art files in the specified directories.
"""
paths = extra['paths']
if not paths:
return
cover_names = extra['cover_names']
cover_pat = br"(\b|_)({0})(\b|_)".format(b'|'.join(cover_names))
cautious = extra['cautious']
cover_names = list(map(util.bytestring_path, plugin.cover_names))
cover_names_str = b'|'.join(cover_names)
cover_pat = br''.join([br"(\b|_)(", cover_names_str, br")(\b|_)"])
for path in paths:
if not os.path.isdir(path):
if not os.path.isdir(syspath(path)):
continue
# Find all files that look like images in the directory.
images = []
for fn in os.listdir(path):
for fn in os.listdir(syspath(path)):
fn = bytestring_path(fn)
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith(b'.' + ext.encode('utf8')) and \
os.path.isfile(os.path.join(path, fn)):
if fn.lower().endswith(b'.' + ext) and \
os.path.isfile(syspath(os.path.join(path, fn))):
images.append(fn)
# Look for "preferred" filenames.
@ -625,7 +666,7 @@ class FileSystem(LocalArtSource):
remaining.append(fn)
# Fall back to any image in the folder.
if remaining and not cautious:
if remaining and not plugin.cautious:
self._log.debug(u'using fallback art file {0}',
util.displayable_path(remaining[0]))
yield self._candidate(path=os.path.join(path, remaining[0]),
@ -691,7 +732,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
confit.String(pattern=self.PAT_PERCENT)]))
self.margin_px = None
self.margin_percent = None
if type(self.enforce_ratio) is unicode:
if type(self.enforce_ratio) is six.text_type:
if self.enforce_ratio[-1] == u'%':
self.margin_percent = float(self.enforce_ratio[:-1]) / 100
elif self.enforce_ratio[-2:] == u'px':
@ -702,7 +743,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
self.enforce_ratio = True
cover_names = self.config['cover_names'].as_str_seq()
self.cover_names = map(util.bytestring_path, cover_names)
self.cover_names = list(map(util.bytestring_path, cover_names))
self.cautious = self.config['cautious'].get(bool)
self.store_source = self.config['store_source'].get(bool)
@ -720,20 +761,30 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
if not self.config['google_key'].get() and \
u'google' in available_sources:
available_sources.remove(u'google')
sources_name = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
available_sources = [(s, c)
for s in available_sources
for c in ART_SOURCES[s].VALID_MATCHING_CRITERIA]
sources = plugins.sanitize_pairs(
self.config['sources'].as_pairs(default_value='*'),
available_sources)
if 'remote_priority' in self.config:
self._log.warning(
u'The `fetch_art.remote_priority` configuration option has '
u'been deprecated, see the documentation.')
u'been deprecated. Instead, place `filesystem` at the end of '
u'your `sources` list.')
if self.config['remote_priority'].get(bool):
try:
sources_name.remove(u'filesystem')
sources_name.append(u'filesystem')
except ValueError:
pass
self.sources = [ART_SOURCES[s](self._log, self.config)
for s in sources_name]
fs = []
others = []
for s, c in sources:
if s == 'filesystem':
fs.append((s, c))
else:
others.append((s, c))
sources = others + fs
self.sources = [ART_SOURCES[s](self._log, self.config, match_by=[c])
for s, c in sources]
# Asynchronous; after music is added to the library.
def fetch_art(self, session, task):
@ -745,7 +796,8 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag == importer.action.APPLY:
elif task.choice_flag in (importer.action.APPLY,
importer.action.RETAG):
# Search everywhere for art.
local = False
else:
@ -786,9 +838,15 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
action='store_true', default=False,
help=u're-download art when already present'
)
cmd.parser.add_option(
u'-q', u'--quiet', dest='quiet',
action='store_true', default=False,
help=u'shows only quiet art'
)
def func(lib, opts, args):
self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force)
self.batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force,
opts.quiet)
cmd.func = func
return [cmd]
@ -803,16 +861,6 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
"""
out = None
# all the information any of the sources might need
extra = {'paths': paths,
'cover_names': self.cover_names,
'cautious': self.cautious,
'enforce_ratio': self.enforce_ratio,
'margin_px': self.margin_px,
'margin_percent': self.margin_percent,
'minwidth': self.minwidth,
'maxwidth': self.maxwidth}
for source in self.sources:
if source.IS_LOCAL or not local_only:
self._log.debug(
@ -822,9 +870,9 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
)
# URLs might be invalid at this point, or the image may not
# fulfill the requirements
for candidate in source.get(album, extra):
source.fetch_image(candidate, extra)
if candidate.validate(extra):
for candidate in source.get(album, self, paths):
source.fetch_image(candidate, self)
if candidate.validate(self):
out = candidate
self._log.debug(
u'using {0.LOC_STR} image {1}'.format(
@ -834,17 +882,20 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
break
if out:
out.resize(extra)
out.resize(self)
return out
def batch_fetch_art(self, lib, albums, force):
def batch_fetch_art(self, lib, albums, force, quiet):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force and os.path.isfile(album.artpath):
message = ui.colorize('text_highlight_minor', u'has album art')
if not quiet:
message = ui.colorize('text_highlight_minor',
u'has album art')
self._log.info(u'{0}: {1}', album, message)
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
@ -857,5 +908,4 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
message = ui.colorize('text_success', u'found album art')
else:
message = ui.colorize('text_error', u'no art found')
self._log.info(u'{0}: {1}', album, message)

View file

@ -20,6 +20,7 @@ from __future__ import division, absolute_import, print_function
import re
from beets import config
from beets.util import bytestring_path
from beets.plugins import BeetsPlugin
from beets.importer import SingletonImportTask
@ -35,14 +36,15 @@ class FileFilterPlugin(BeetsPlugin):
self.path_album_regex = \
self.path_singleton_regex = \
re.compile(self.config['path'].get())
re.compile(bytestring_path(self.config['path'].get()))
if 'album_path' in self.config:
self.path_album_regex = re.compile(self.config['album_path'].get())
self.path_album_regex = re.compile(
bytestring_path(self.config['album_path'].get()))
if 'singleton_path' in self.config:
self.path_singleton_regex = re.compile(
self.config['singleton_path'].get())
bytestring_path(self.config['singleton_path'].get()))
def import_task_created_event(self, session, task):
if task.items and len(task.items) > 0:
@ -69,6 +71,7 @@ class FileFilterPlugin(BeetsPlugin):
of the file given in full_path.
"""
import_config = dict(config['import'])
full_path = bytestring_path(full_path)
if 'singletons' not in import_config or not import_config[
'singletons']:
# Album

View file

@ -22,34 +22,26 @@ from beets import plugins
from beets.util import displayable_path
import os
import re
import six
# Filename field extraction patterns.
PATTERNS = [
# "01 - Track 01" and "01": do nothing
r'^(\d+)\s*-\s*track\s*\d$',
r'^\d+$',
# Useful patterns.
r'^(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$',
r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$',
r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)-(?P<tag>.*)$',
r'^(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\.\s*(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\s*-\s*(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\s*-(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<track>\d+)\s(?P<artist>.+)-(?P<title>.+)$',
r'^(?P<artist>.+)[\-_](?P<title>.+)[\-_](?P<tag>.*)$',
r'^(?P<track>\d+)[\s.\-_]+(?P<artist>.+)[\-_](?P<title>.+)[\-_](?P<tag>.*)$',
r'^(?P<artist>.+)[\-_](?P<title>.+)$',
r'^(?P<track>\d+)[\s.\-_]+(?P<artist>.+)[\-_](?P<title>.+)$',
r'^(?P<title>.+)$',
r'^(?P<track>\d+)\.\s*(?P<title>.+)$',
r'^(?P<track>\d+)\s*-\s*(?P<title>.+)$',
r'^(?P<track>\d+)\s(?P<title>.+)$',
r'^(?P<track>\d+)[\s.\-_]+(?P<title>.+)$',
r'^(?P<track>\d+)\s+(?P<title>.+)$',
r'^(?P<title>.+) by (?P<artist>.+)$',
r'^(?P<track>\d+).*$',
]
# Titles considered "empty" and in need of replacement.
BAD_TITLE_PATTERNS = [
r'^$',
r'\d+?\s?-?\s*track\s*\d+',
]
@ -100,7 +92,7 @@ def apply_matches(d):
"""Given a mapping from items to field dicts, apply the fields to
the objects.
"""
some_map = d.values()[0]
some_map = list(d.values())[0]
keys = some_map.keys()
# Only proceed if the "tag" field is equal across all filenames.
@ -132,7 +124,7 @@ def apply_matches(d):
# Apply the title and track.
for item in d:
if bad_title(item.title):
item.title = unicode(d[item][title_field])
item.title = six.text_type(d[item][title_field])
if 'track' in d[item] and item.track == 0:
item.track = int(d[item]['track'])

View file

@ -49,29 +49,28 @@ def find_feat_part(artist, albumartist):
"""Attempt to find featured artists in the item's artist fields and
return the results. Returns None if no featured artist found.
"""
feat_part = None
# Look for the album artist in the artist field. If it's not
# present, give up.
albumartist_split = artist.split(albumartist, 1)
if len(albumartist_split) <= 1:
return feat_part
return None
# If the last element of the split (the right-hand side of the
# album artist) is nonempty, then it probably contains the
# featured artist.
elif albumartist_split[-1] != '':
elif albumartist_split[1] != '':
# Extract the featured artist from the right-hand side.
_, feat_part = split_on_feat(albumartist_split[-1])
_, feat_part = split_on_feat(albumartist_split[1])
return feat_part
# Otherwise, if there's nothing on the right-hand side, look for a
# featuring artist on the left-hand side.
else:
lhs, rhs = split_on_feat(albumartist_split[0])
if lhs:
feat_part = lhs
return lhs
return feat_part
return None
class FtInTitlePlugin(plugins.BeetsPlugin):
@ -90,7 +89,7 @@ class FtInTitlePlugin(plugins.BeetsPlugin):
self._command.parser.add_option(
u'-d', u'--drop', dest='drop',
action='store_true', default=False,
action='store_true', default=None,
help=u'drop featuring from artists and ignore title update')
if self.config['auto']:
@ -137,7 +136,7 @@ class FtInTitlePlugin(plugins.BeetsPlugin):
# Only update the title if it does not already contain a featured
# artist and if we do not drop featuring information.
if not drop_feat and not contains_feat(item.title):
feat_format = self.config['format'].get(unicode)
feat_format = self.config['format'].as_str()
new_format = feat_format.format(feat_part)
new_title = u"{0} {1}".format(item.title, new_format)
self._log.info(u'title: {0} -> {1}', item.title, new_title)

View file

@ -44,5 +44,5 @@ class FuzzyPlugin(BeetsPlugin):
})
def queries(self):
prefix = self.config['prefix'].get(basestring)
prefix = self.config['prefix'].as_str()
return {prefix: FuzzyQuery}

96
libs/beetsplug/gmusic.py Normal file
View file

@ -0,0 +1,96 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Tigran Kostandyan.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Upload files to Google Play Music and list songs in its library."""
from __future__ import absolute_import, division, print_function
import os.path
from beets.plugins import BeetsPlugin
from beets import ui
from beets import config
from beets.ui import Subcommand
from gmusicapi import Musicmanager, Mobileclient
from gmusicapi.exceptions import NotLoggedIn
import gmusicapi.clients
class Gmusic(BeetsPlugin):
def __init__(self):
super(Gmusic, self).__init__()
# Checks for OAuth2 credentials,
# if they don't exist - performs authorization
self.m = Musicmanager()
if os.path.isfile(gmusicapi.clients.OAUTH_FILEPATH):
self.m.login()
else:
self.m.perform_oauth()
def commands(self):
gupload = Subcommand('gmusic-upload',
help=u'upload your tracks to Google Play Music')
gupload.func = self.upload
search = Subcommand('gmusic-songs',
help=u'list of songs in Google Play Music library'
)
search.parser.add_option('-t', '--track', dest='track',
action='store_true',
help='Search by track name')
search.parser.add_option('-a', '--artist', dest='artist',
action='store_true',
help='Search by artist')
search.func = self.search
return [gupload, search]
def upload(self, lib, opts, args):
items = lib.items(ui.decargs(args))
files = [x.path.decode('utf-8') for x in items]
ui.print_(u'Uploading your files...')
self.m.upload(filepaths=files)
ui.print_(u'Your files were successfully added to library')
def search(self, lib, opts, args):
password = config['gmusic']['password']
email = config['gmusic']['email']
password.redact = True
email.redact = True
# Since Musicmanager doesn't support library management
# we need to use mobileclient interface
mobile = Mobileclient()
try:
mobile.login(email.as_str(), password.as_str(),
Mobileclient.FROM_MAC_ADDRESS)
files = mobile.get_all_songs()
except NotLoggedIn:
ui.print_(
u'Authentication error. Please check your email and password.'
)
return
if not args:
for i, file in enumerate(files, start=1):
print(i, ui.colorize('blue', file['artist']),
file['title'], ui.colorize('red', file['album']))
else:
if opts.track:
self.match(files, args, 'title')
else:
self.match(files, args, 'artist')
@staticmethod
def match(files, args, search_by):
for file in files:
if ' '.join(ui.decargs(args)) in file[search_by]:
print(file['artist'], file['title'], file['album'])

View file

@ -1,3 +1,4 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
@ -17,15 +18,20 @@ from __future__ import division, absolute_import, print_function
import string
import subprocess
import six
from beets.plugins import BeetsPlugin
from beets.ui import _arg_encoding
from beets.util import shlex_split
from beets.util import shlex_split, arg_encoding
class CodingFormatter(string.Formatter):
"""A custom string formatter that decodes the format string and it's
fields.
"""A variant of `string.Formatter` that converts everything to `unicode`
strings.
This is necessary on Python 2, where formatting otherwise occurs on
bytestrings. It intercepts two points in the formatting process to decode
the format string and all fields using the specified encoding. If decoding
fails, the values are used as-is.
"""
def __init__(self, coding):
@ -57,10 +63,9 @@ class CodingFormatter(string.Formatter):
"""
converted = super(CodingFormatter, self).convert_field(value,
conversion)
try:
converted = converted.decode(self._coding)
except UnicodeEncodeError:
pass
if isinstance(converted, bytes):
return converted.decode(self._coding)
return converted
@ -79,8 +84,8 @@ class HookPlugin(BeetsPlugin):
for hook_index in range(len(hooks)):
hook = self.config['hooks'][hook_index]
hook_event = hook['event'].get(unicode)
hook_command = hook['command'].get(unicode)
hook_event = hook['event'].as_str()
hook_command = hook['command'].as_str()
self.create_and_register_hook(hook_event, hook_command)
@ -90,7 +95,12 @@ class HookPlugin(BeetsPlugin):
self._log.error('invalid command "{0}"', command)
return
formatter = CodingFormatter(_arg_encoding())
# Use a string formatter that works on Unicode strings.
if six.PY2:
formatter = CodingFormatter(arg_encoding())
else:
formatter = string.Formatter()
command_pieces = shlex_split(command)
for i, piece in enumerate(command_pieces):

View file

@ -30,12 +30,13 @@ class ImportAddedPlugin(BeetsPlugin):
self.item_mtime = dict()
register = self.register_listener
register('import_task_start', self.check_config)
register('import_task_start', self.record_if_inplace)
register('import_task_created', self.check_config)
register('import_task_created', self.record_if_inplace)
register('import_task_files', self.record_reimported)
register('before_item_moved', self.record_import_mtime)
register('item_copied', self.record_import_mtime)
register('item_linked', self.record_import_mtime)
register('item_hardlinked', self.record_import_mtime)
register('album_imported', self.update_album_times)
register('item_imported', self.update_item_times)
register('after_write', self.update_after_write_time)
@ -51,7 +52,7 @@ class ImportAddedPlugin(BeetsPlugin):
def record_if_inplace(self, task, session):
if not (session.config['copy'] or session.config['move'] or
session.config['link']):
session.config['link'] or session.config['hardlink']):
self._log.debug(u"In place import detected, recording mtimes from "
u"source paths")
items = [task.item] \
@ -62,7 +63,7 @@ class ImportAddedPlugin(BeetsPlugin):
def record_reimported(self, task, session):
self.reimported_item_ids = set(item.id for item, replaced_items
in task.replaced_items.iteritems()
in task.replaced_items.items()
if replaced_items)
self.replaced_album_paths = set(task.replaced_albums.keys())

View file

@ -24,26 +24,12 @@ import os
import re
from beets.plugins import BeetsPlugin
from beets.util import mkdirall, normpath, syspath, bytestring_path
from beets.util import mkdirall, normpath, syspath, bytestring_path, link
from beets import config
M3U_DEFAULT_NAME = 'imported.m3u'
def _get_feeds_dir(lib):
"""Given a Library object, return the path to the feeds directory to be
used (either in the library directory or an explicitly configured
path). Ensures that the directory exists.
"""
# Inside library directory.
dirpath = lib.directory
# Ensure directory exists.
if not os.path.exists(syspath(dirpath)):
os.makedirs(syspath(dirpath))
return dirpath
def _build_m3u_filename(basename):
"""Builds unique m3u filename by appending given basename to current
date."""
@ -61,7 +47,7 @@ def _write_m3u(m3u_path, items_paths):
"""Append relative paths to items into m3u file.
"""
mkdirall(m3u_path)
with open(syspath(m3u_path), 'a') as f:
with open(syspath(m3u_path), 'ab') as f:
for path in items_paths:
f.write(path + b'\n')
@ -78,30 +64,28 @@ class ImportFeedsPlugin(BeetsPlugin):
'absolute_path': False,
})
feeds_dir = self.config['dir'].get()
if feeds_dir:
feeds_dir = os.path.expanduser(bytestring_path(feeds_dir))
self.config['dir'] = feeds_dir
if not os.path.exists(syspath(feeds_dir)):
os.makedirs(syspath(feeds_dir))
relative_to = self.config['relative_to'].get()
if relative_to:
self.config['relative_to'] = normpath(relative_to)
else:
self.config['relative_to'] = feeds_dir
self.config['relative_to'] = self.get_feeds_dir()
self.register_listener('library_opened', self.library_opened)
self.register_listener('album_imported', self.album_imported)
self.register_listener('item_imported', self.item_imported)
def get_feeds_dir(self):
feeds_dir = self.config['dir'].get()
if feeds_dir:
return os.path.expanduser(bytestring_path(feeds_dir))
return config['directory'].as_filename()
def _record_items(self, lib, basename, items):
"""Records relative paths to the given items for each feed format
"""
feedsdir = bytestring_path(self.config['dir'].as_filename())
feedsdir = bytestring_path(self.get_feeds_dir())
formats = self.config['formats'].as_str_seq()
relative_to = self.config['relative_to'].get() \
or self.config['dir'].as_filename()
or self.get_feeds_dir()
relative_to = bytestring_path(relative_to)
paths = []
@ -119,7 +103,7 @@ class ImportFeedsPlugin(BeetsPlugin):
if 'm3u' in formats:
m3u_basename = bytestring_path(
self.config['m3u_name'].get(unicode))
self.config['m3u_name'].as_str())
m3u_path = os.path.join(feedsdir, m3u_basename)
_write_m3u(m3u_path, paths)
@ -131,17 +115,13 @@ class ImportFeedsPlugin(BeetsPlugin):
for path in paths:
dest = os.path.join(feedsdir, os.path.basename(path))
if not os.path.exists(syspath(dest)):
os.symlink(syspath(path), syspath(dest))
link(path, dest)
if 'echo' in formats:
self._log.info(u"Location of imported music:")
for path in paths:
self._log.info(u" {0}", path)
def library_opened(self, lib):
if self.config['dir'].get() is None:
self.config['dir'] = _get_feeds_dir(lib)
def album_imported(self, lib, album):
self._record_items(lib, album.album, album.items())

View file

@ -73,7 +73,7 @@ def library_data_emitter(item):
def update_summary(summary, tags):
for key, value in tags.iteritems():
for key, value in tags.items():
if key not in summary:
summary[key] = value
elif summary[key] != value:
@ -96,7 +96,7 @@ def print_data(data, item=None, fmt=None):
path = displayable_path(item.path) if item else None
formatted = {}
for key, value in data.iteritems():
for key, value in data.items():
if isinstance(value, list):
formatted[key] = u'; '.join(value)
if value is not None:
@ -123,7 +123,7 @@ def print_data_keys(data, item=None):
"""
path = displayable_path(item.path) if item else None
formatted = []
for key, value in data.iteritems():
for key, value in data.items():
formatted.append(key)
if len(formatted) == 0:
@ -204,7 +204,8 @@ class InfoPlugin(BeetsPlugin):
if opts.keys_only:
print_data_keys(data, item)
else:
print_data(data, item, opts.format)
fmt = ui.decargs([opts.format])[0] if opts.format else None
print_data(data, item, fmt)
first = False
if opts.summarize:
@ -230,7 +231,7 @@ def make_key_filter(include):
def filter_(data):
filtered = dict()
for key, value in data.items():
if any(map(lambda m: m.match(key), matchers)):
if any([m.match(key) for m in matchers]):
filtered[key] = value
return filtered

View file

@ -22,6 +22,7 @@ import itertools
from beets.plugins import BeetsPlugin
from beets import config
import six
FUNC_NAME = u'__INLINE_FUNC__'
@ -32,7 +33,7 @@ class InlineError(Exception):
def __init__(self, code, exc):
super(InlineError, self).__init__(
(u"error in inline path field code:\n"
u"%s\n%s: %s") % (code, type(exc).__name__, unicode(exc))
u"%s\n%s: %s") % (code, type(exc).__name__, six.text_type(exc))
)
@ -64,14 +65,14 @@ class InlinePlugin(BeetsPlugin):
for key, view in itertools.chain(config['item_fields'].items(),
config['pathfields'].items()):
self._log.debug(u'adding item field {0}', key)
func = self.compile_inline(view.get(unicode), False)
func = self.compile_inline(view.as_str(), False)
if func is not None:
self.template_fields[key] = func
# Album fields.
for key, view in config['album_fields'].items():
self._log.debug(u'adding album field {0}', key)
func = self.compile_inline(view.get(unicode), True)
func = self.compile_inline(view.as_str(), True)
if func is not None:
self.album_template_fields[key] = func

View file

@ -272,9 +272,11 @@ class IPFSPlugin(BeetsPlugin):
break
except AttributeError:
pass
item_path = os.path.basename(item.path).decode(
util._fsencoding(), 'ignore'
)
# Clear current path from item
item.path = '/ipfs/{0}/{1}'.format(album.ipfs,
os.path.basename(item.path))
item.path = '/ipfs/{0}/{1}'.format(album.ipfs, item_path)
item.id = None
items.append(item)

View file

@ -48,18 +48,18 @@ class KeyFinderPlugin(BeetsPlugin):
self.find_key(lib.items(ui.decargs(args)), write=ui.should_write())
def imported(self, session, task):
self.find_key(task.items)
self.find_key(task.imported_items())
def find_key(self, items, write=False):
overwrite = self.config['overwrite'].get(bool)
bin = util.bytestring_path(self.config['bin'].get(unicode))
bin = self.config['bin'].as_str()
for item in items:
if item['initial_key'] and not overwrite:
continue
try:
output = util.command_output([bin, b'-f',
output = util.command_output([bin, '-f',
util.syspath(item.path)])
except (subprocess.CalledProcessError, OSError) as exc:
self._log.error(u'execution failed: {0}', exc)
@ -73,7 +73,7 @@ class KeyFinderPlugin(BeetsPlugin):
key_raw = output.rsplit(None, 1)[-1]
try:
key = key_raw.decode('utf8')
key = util.text_string(key_raw)
except UnicodeDecodeError:
self._log.error(u'output is invalid UTF-8')
continue

View file

@ -0,0 +1,98 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2017, Pauli Kettunen.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Kodi library whenever the beets library is changed.
This is based on the Plex Update plugin.
Put something like the following in your config.yaml to configure:
kodi:
host: localhost
port: 8080
user: user
pwd: secret
"""
from __future__ import division, absolute_import, print_function
import requests
from beets import config
from beets.plugins import BeetsPlugin
import six
def update_kodi(host, port, user, password):
"""Sends request to the Kodi api to start a library refresh.
"""
url = "http://{0}:{1}/jsonrpc".format(host, port)
"""Content-Type: application/json is mandatory
according to the kodi jsonrpc documentation"""
headers = {'Content-Type': 'application/json'}
# Create the payload. Id seems to be mandatory.
payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1}
r = requests.post(
url,
auth=(user, password),
json=payload,
headers=headers)
return r
class KodiUpdate(BeetsPlugin):
def __init__(self):
super(KodiUpdate, self).__init__()
# Adding defaults.
config['kodi'].add({
u'host': u'localhost',
u'port': 8080,
u'user': u'kodi',
u'pwd': u'kodi'})
config['kodi']['pwd'].redact = True
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to Kodi server.
"""
self._log.info(u'Requesting a Kodi library update...')
# Try to send update request.
try:
r = update_kodi(
config['kodi']['host'].get(),
config['kodi']['port'].get(),
config['kodi']['user'].get(),
config['kodi']['pwd'].get())
r.raise_for_status()
except requests.exceptions.RequestException as e:
self._log.warning(u'Kodi update failed: {0}',
six.text_type(e))
return
json = r.json()
if json.get('result') != 'OK':
self._log.warning(u'Kodi update failed: JSON response was {0!r}',
json)
return
self._log.info(u'Kodi update triggered')

View file

@ -14,6 +14,7 @@
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import six
"""Gets genres for imported music based on Last.fm tags.
@ -24,6 +25,7 @@ The scraper script used is available here:
https://gist.github.com/1241307
"""
import pylast
import codecs
import os
import yaml
import traceback
@ -71,7 +73,7 @@ def flatten_tree(elem, path, branches):
for sub in elem:
flatten_tree(sub, path, branches)
else:
branches.append(path + [unicode(elem)])
branches.append(path + [six.text_type(elem)])
def find_parents(candidate, branches):
@ -107,6 +109,7 @@ class LastGenrePlugin(plugins.BeetsPlugin):
'force': True,
'auto': True,
'separator': u', ',
'prefer_specific': False,
})
self.setup()
@ -126,9 +129,9 @@ class LastGenrePlugin(plugins.BeetsPlugin):
wl_filename = WHITELIST
if wl_filename:
wl_filename = normpath(wl_filename)
with open(wl_filename, 'r') as f:
with open(wl_filename, 'rb') as f:
for line in f:
line = line.decode('utf8').strip().lower()
line = line.decode('utf-8').strip().lower()
if line and not line.startswith(u'#'):
self.whitelist.add(line)
@ -139,7 +142,8 @@ class LastGenrePlugin(plugins.BeetsPlugin):
c14n_filename = C14N_TREE
if c14n_filename:
c14n_filename = normpath(c14n_filename)
genres_tree = yaml.load(open(c14n_filename, 'r'))
with codecs.open(c14n_filename, 'r', encoding='utf-8') as f:
genres_tree = yaml.load(f)
flatten_tree(genres_tree, [], self.c14n_branches)
@property
@ -155,6 +159,25 @@ class LastGenrePlugin(plugins.BeetsPlugin):
elif source == 'artist':
return 'artist',
def _get_depth(self, tag):
"""Find the depth of a tag in the genres tree.
"""
depth = None
for key, value in enumerate(self.c14n_branches):
if tag in value:
depth = value.index(tag)
break
return depth
def _sort_by_depth(self, tags):
"""Given a list of tags, sort the tags by their depths in the
genre tree.
"""
depth_tag_pairs = [(self._get_depth(t), t) for t in tags]
depth_tag_pairs = [e for e in depth_tag_pairs if e[0] is not None]
depth_tag_pairs.sort(reverse=True)
return [p[1] for p in depth_tag_pairs]
def _resolve_genres(self, tags):
"""Given a list of strings, return a genre by joining them into a
single string and (optionally) canonicalizing each.
@ -176,17 +199,24 @@ class LastGenrePlugin(plugins.BeetsPlugin):
parents = [find_parents(tag, self.c14n_branches)[-1]]
tags_all += parents
if len(tags_all) >= count:
# Stop if we have enough tags already, unless we need to find
# the most specific tag (instead of the most popular).
if (not self.config['prefer_specific'] and
len(tags_all) >= count):
break
tags = tags_all
tags = deduplicate(tags)
# Sort the tags by specificity.
if self.config['prefer_specific']:
tags = self._sort_by_depth(tags)
# c14n only adds allowed genres but we may have had forbidden genres in
# the original tags list
tags = [x.title() for x in tags if self._is_allowed(x)]
return self.config['separator'].get(unicode).join(
return self.config['separator'].as_str().join(
tags[:self.config['count'].get(int)]
)
@ -221,7 +251,8 @@ class LastGenrePlugin(plugins.BeetsPlugin):
if any(not s for s in args):
return None
key = u'{0}.{1}'.format(entity, u'-'.join(unicode(a) for a in args))
key = u'{0}.{1}'.format(entity,
u'-'.join(six.text_type(a) for a in args))
if key in self._genre_cache:
return self._genre_cache[key]
else:
@ -297,7 +328,7 @@ class LastGenrePlugin(plugins.BeetsPlugin):
result = None
if isinstance(obj, library.Item):
result = self.fetch_artist_genre(obj)
elif obj.albumartist != config['va_name'].get(unicode):
elif obj.albumartist != config['va_name'].as_str():
result = self.fetch_album_artist_genre(obj)
else:
# For "Various Artists", pick the most popular track genre.
@ -400,7 +431,7 @@ class LastGenrePlugin(plugins.BeetsPlugin):
"""
# Work around an inconsistency in pylast where
# Album.get_top_tags() does not return TopItem instances.
# https://code.google.com/p/pylast/issues/detail?id=85
# https://github.com/pylast/pylast/issues/86
if isinstance(obj, pylast.Album):
obj = super(pylast.Album, obj)

View file

@ -23,7 +23,7 @@ from beets import config
from beets import plugins
from beets.dbcore import types
API_URL = 'http://ws.audioscrobbler.com/2.0/'
API_URL = 'https://ws.audioscrobbler.com/2.0/'
class LastImportPlugin(plugins.BeetsPlugin):
@ -110,7 +110,7 @@ class CustomUser(pylast.User):
def import_lastfm(lib, log):
user = config['lastfm']['user'].get(unicode)
user = config['lastfm']['user'].as_str()
per_page = config['lastimport']['per_page'].get(int)
if not user:
@ -192,7 +192,7 @@ def process_tracks(lib, tracks, log):
total_fails = 0
log.info(u'Received {0} tracks in this page, processing...', total)
for num in xrange(0, total):
for num in range(0, total):
song = None
trackid = tracks[num]['mbid'].strip()
artist = tracks[num]['artist'].get('name', '').strip()

View file

@ -19,14 +19,18 @@
from __future__ import absolute_import, division, print_function
import difflib
import errno
import itertools
import json
import struct
import os.path
import re
import requests
import unicodedata
import urllib
from unidecode import unidecode
import warnings
from HTMLParser import HTMLParseError
import six
from six.moves import urllib
try:
from bs4 import SoupStrainer, BeautifulSoup
@ -40,9 +44,18 @@ try:
except ImportError:
HAS_LANGDETECT = False
try:
# PY3: HTMLParseError was removed in 3.5 as strict mode
# was deprecated in 3.3.
# https://docs.python.org/3.3/library/html.parser.html
from six.moves.html_parser import HTMLParseError
except ImportError:
class HTMLParseError(Exception):
pass
from beets import plugins
from beets import ui
import beets
DIV_RE = re.compile(r'<(/?)div>?', re.I)
COMMENT_RE = re.compile(r'<!--.*-->', re.S)
@ -62,20 +75,62 @@ URL_CHARACTERS = {
u'\u2016': u'-',
u'\u2026': u'...',
}
USER_AGENT = 'beets/{}'.format(beets.__version__)
# The content for the base index.rst generated in ReST mode.
REST_INDEX_TEMPLATE = u'''Lyrics
======
* :ref:`Song index <genindex>`
* :ref:`search`
Artist index:
.. toctree::
:maxdepth: 1
:glob:
artists/*
'''
# The content for the base conf.py generated.
REST_CONF_TEMPLATE = u'''# -*- coding: utf-8 -*-
master_doc = 'index'
project = u'Lyrics'
copyright = u'none'
author = u'Various Authors'
latex_documents = [
(master_doc, 'Lyrics.tex', project,
author, 'manual'),
]
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
epub_exclude_files = ['search.html']
epub_tocdepth = 1
epub_tocdup = False
'''
# Utilities.
def unichar(i):
try:
return six.unichr(i)
except ValueError:
return struct.pack('i', i).decode('utf-32')
def unescape(text):
"""Resolve &#xxx; HTML entities (and some others)."""
if isinstance(text, bytes):
text = text.decode('utf8', 'ignore')
text = text.decode('utf-8', 'ignore')
out = text.replace(u'&nbsp;', u' ')
def replchar(m):
num = m.group(1)
return unichr(int(num))
return unichar(int(num))
out = re.sub(u"&#(\d+);", replchar, out)
return out
@ -93,7 +148,6 @@ def extract_text_in(html, starttag):
"""Extract the text from a <DIV> tag in the HTML starting with
``starttag``. Returns None if parsing fails.
"""
# Strip off the leading text before opening tag.
try:
_, html = html.split(starttag, 1)
@ -134,30 +188,33 @@ def search_pairs(item):
and featured artists from the strings and add them as candidates.
The method also tries to split multiple titles separated with `/`.
"""
def generate_alternatives(string, patterns):
"""Generate string alternatives by extracting first matching group for
each given pattern.
"""
alternatives = [string]
for pattern in patterns:
match = re.search(pattern, string, re.IGNORECASE)
if match:
alternatives.append(match.group(1))
return alternatives
title, artist = item.title, item.artist
titles = [title]
artists = [artist]
patterns = [
# Remove any featuring artists from the artists name
pattern = r"(.*?) {0}".format(plugins.feat_tokens())
match = re.search(pattern, artist, re.IGNORECASE)
if match:
artists.append(match.group(1))
r"(.*?) {0}".format(plugins.feat_tokens())]
artists = generate_alternatives(artist, patterns)
patterns = [
# Remove a parenthesized suffix from a title string. Common
# examples include (live), (remix), and (acoustic).
pattern = r"(.+?)\s+[(].*[)]$"
match = re.search(pattern, title, re.IGNORECASE)
if match:
titles.append(match.group(1))
r"(.+?)\s+[(].*[)]$",
# Remove any featuring artists from the title
pattern = r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False))
for title in titles[:]:
match = re.search(pattern, title, re.IGNORECASE)
if match:
titles.append(match.group(1))
r"(.*?) {0}".format(plugins.feat_tokens(for_artist=False)),
# Remove part of title after colon ':' for songs with subtitles
r"(.+?)\s*:.*"]
titles = generate_alternatives(title, patterns)
# Check for a dual song (e.g. Pink Floyd - Speak to Me / Breathe)
# and each of them.
@ -170,6 +227,24 @@ def search_pairs(item):
return itertools.product(artists, multi_titles)
def slug(text):
"""Make a URL-safe, human-readable version of the given text
This will do the following:
1. decode unicode characters into ASCII
2. shift everything to lowercase
3. strip whitespace
4. replace other non-word characters with dashes
5. strip extra dashes
This somewhat duplicates the :func:`Google.slugify` function but
slugify is not as generic as this one, which can be reused
elsewhere.
"""
return re.sub(r'\W+', '-', unidecode(text).lower().strip()).strip('-')
class Backend(object):
def __init__(self, config, log):
self._log = log
@ -177,11 +252,11 @@ class Backend(object):
@staticmethod
def _encode(s):
"""Encode the string for inclusion in a URL"""
if isinstance(s, unicode):
if isinstance(s, six.text_type):
for char, repl in URL_CHARACTERS.items():
s = s.replace(char, repl)
s = s.encode('utf8', 'ignore')
return urllib.quote(s)
s = s.encode('utf-8', 'ignore')
return urllib.parse.quote(s)
def build_url(self, artist, title):
return self.URL_PATTERN % (self._encode(artist.title()),
@ -198,7 +273,9 @@ class Backend(object):
# We're not overly worried about the NSA MITMing our lyrics scraper
with warnings.catch_warnings():
warnings.simplefilter('ignore')
r = requests.get(url, verify=False)
r = requests.get(url, verify=False, headers={
'User-Agent': USER_AGENT,
})
except requests.RequestException as exc:
self._log.debug(u'lyrics request failed: {0}', exc)
return
@ -218,12 +295,12 @@ class SymbolsReplaced(Backend):
'>': 'Greater_Than',
'#': 'Number_',
r'[\[\{]': '(',
r'[\[\{]': ')'
r'[\]\}]': ')',
}
@classmethod
def _encode(cls, s):
for old, new in cls.REPLACEMENTS.iteritems():
for old, new in cls.REPLACEMENTS.items():
s = re.sub(old, new, s)
return super(SymbolsReplaced, cls)._encode(s)
@ -238,104 +315,97 @@ class MusiXmatch(SymbolsReplaced):
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return
lyrics = extract_text_between(html,
'"body":', '"language":')
return lyrics.strip(',"').replace('\\n', '\n')
if "We detected that your IP is blocked" in html:
self._log.warning(u'we are blocked at MusixMatch: url %s failed'
% url)
return
html_part = html.split('<p class="mxm-lyrics__content')[-1]
lyrics = extract_text_between(html_part, '>', '</p>')
lyrics = lyrics.strip(',"').replace('\\n', '\n')
# another odd case: sometimes only that string remains, for
# missing songs. this seems to happen after being blocked
# above, when filling in the CAPTCHA.
if "Instant lyrics for all your music." in lyrics:
return
return lyrics
class Genius(Backend):
"""Fetch lyrics from Genius via genius-api."""
"""Fetch lyrics from Genius via genius-api.
Simply adapted from
bigishdata.com/2016/09/27/getting-song-lyrics-from-geniuss-api-scraping/
"""
base_url = "https://api.genius.com"
def __init__(self, config, log):
super(Genius, self).__init__(config, log)
self.api_key = config['genius_api_key'].get(unicode)
self.headers = {'Authorization': "Bearer %s" % self.api_key}
self.api_key = config['genius_api_key'].as_str()
self.headers = {
'Authorization': "Bearer %s" % self.api_key,
'User-Agent': USER_AGENT,
}
def search_genius(self, artist, title):
query = u"%s %s" % (artist, title)
url = u'https://api.genius.com/search?q=%s' \
% (urllib.quote(query.encode('utf8')))
def lyrics_from_song_api_path(self, song_api_path):
song_url = self.base_url + song_api_path
response = requests.get(song_url, headers=self.headers)
json = response.json()
path = json["response"]["song"]["path"]
self._log.debug(u'genius: requesting search {}', url)
# Gotta go regular html scraping... come on Genius.
page_url = "https://genius.com" + path
try:
req = requests.get(
url,
headers=self.headers,
allow_redirects=True
)
req.raise_for_status()
page = requests.get(page_url)
except requests.RequestException as exc:
self._log.debug(u'genius: request error: {}', exc)
self._log.debug(u'Genius page request for {0} failed: {1}',
page_url, exc)
return None
html = BeautifulSoup(page.text, "html.parser")
try:
return req.json()
except ValueError:
self._log.debug(u'genius: invalid response: {}', req.text)
return None
# Remove script tags that they put in the middle of the lyrics.
[h.extract() for h in html('script')]
def get_lyrics(self, link):
url = u'http://genius-api.com/api/lyricsInfo'
self._log.debug(u'genius: requesting lyrics for link {}', link)
try:
req = requests.post(
url,
data={'link': link},
headers=self.headers,
allow_redirects=True
)
req.raise_for_status()
except requests.RequestException as exc:
self._log.debug(u'genius: request error: {}', exc)
return None
try:
return req.json()
except ValueError:
self._log.debug(u'genius: invalid response: {}', req.text)
return None
def build_lyric_string(self, lyrics):
if 'lyrics' not in lyrics:
return
sections = lyrics['lyrics']['sections']
lyrics_list = []
for section in sections:
lyrics_list.append(section['name'])
lyrics_list.append('\n')
for verse in section['verses']:
if 'content' in verse:
lyrics_list.append(verse['content'])
return ''.join(lyrics_list)
def fetch(self, artist, title):
search_data = self.search_genius(artist, title)
if not search_data:
return
if not search_data['meta']['status'] == 200:
return
else:
records = search_data['response']['hits']
if not records:
return
record_url = records[0]['result']['url']
lyric_data = self.get_lyrics(record_url)
if not lyric_data:
return
lyrics = self.build_lyric_string(lyric_data)
# At least Genius is nice and has a tag called 'lyrics'!
# Updated css where the lyrics are based in HTML.
lyrics = html.find("div", class_="lyrics").get_text()
return lyrics
def fetch(self, artist, title):
search_url = self.base_url + "/search"
data = {'q': title}
try:
response = requests.get(search_url, data=data,
headers=self.headers)
except requests.RequestException as exc:
self._log.debug(u'Genius API request failed: {0}', exc)
return None
try:
json = response.json()
except ValueError:
self._log.debug(u'Genius API request returned invalid JSON')
return None
song_info = None
for hit in json["response"]["hits"]:
if hit["result"]["primary_artist"]["name"] == artist:
song_info = hit
break
if song_info:
song_api_path = song_info["result"]["api_path"]
return self.lyrics_from_song_api_path(song_api_path)
class LyricsWiki(SymbolsReplaced):
"""Fetch lyrics from LyricsWiki."""
URL_PATTERN = 'http://lyrics.wikia.com/%s:%s'
def fetch(self, artist, title):
@ -354,38 +424,6 @@ class LyricsWiki(SymbolsReplaced):
return lyrics
class LyricsCom(Backend):
"""Fetch lyrics from Lyrics.com."""
URL_PATTERN = 'http://www.lyrics.com/%s-lyrics-%s.html'
NOT_FOUND = (
'Sorry, we do not have the lyric',
'Submit Lyrics',
)
@classmethod
def _encode(cls, s):
s = re.sub(r'[^\w\s-]', '', s)
s = re.sub(r'\s+', '-', s)
return super(LyricsCom, cls)._encode(s).lower()
def fetch(self, artist, title):
url = self.build_url(artist, title)
html = self.fetch_url(url)
if not html:
return
lyrics = extract_text_between(html, '<div id="lyrics" class="SCREENO'
'NLY" itemprop="description">', '</div>')
if not lyrics:
return
for not_found_str in self.NOT_FOUND:
if not_found_str in lyrics:
return
parts = lyrics.split('\n---\nLyrics powered by', 1)
if parts:
return parts[0]
def remove_credits(text):
"""Remove first/last line of text if it contains the word 'lyrics'
eg 'Lyrics by songsdatabase.com'
@ -459,10 +497,11 @@ def scrape_lyrics_from_html(html):
class Google(Backend):
"""Fetch lyrics from Google search results."""
def __init__(self, config, log):
super(Google, self).__init__(config, log)
self.api_key = config['google_API_key'].get(unicode)
self.engine_id = config['google_engine_ID'].get(unicode)
self.api_key = config['google_API_key'].as_str()
self.engine_id = config['google_engine_ID'].as_str()
def is_lyrics(self, text, artist=None):
"""Determine whether the text seems to be valid lyrics.
@ -503,7 +542,7 @@ class Google(Backend):
try:
text = unicodedata.normalize('NFKD', text).encode('ascii',
'ignore')
text = unicode(re.sub('[-\s]+', ' ', text))
text = six.text_type(re.sub('[-\s]+', ' ', text.decode('utf-8')))
except UnicodeDecodeError:
self._log.exception(u"Failing to normalize '{0}'", text)
return text
@ -542,14 +581,20 @@ class Google(Backend):
query = u"%s %s" % (artist, title)
url = u'https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s' \
% (self.api_key, self.engine_id,
urllib.quote(query.encode('utf8')))
urllib.parse.quote(query.encode('utf-8')))
data = urllib.urlopen(url)
data = json.load(data)
data = self.fetch_url(url)
if not data:
self._log.debug(u'google backend returned no data')
return None
try:
data = json.loads(data)
except ValueError as exc:
self._log.debug(u'google backend returned malformed JSON: {}', exc)
if 'error' in data:
reason = data['error']['errors'][0]['reason']
self._log.debug(u'google lyrics backend error: {0}', reason)
return
self._log.debug(u'google backend error: {0}', reason)
return None
if 'items' in data.keys():
for item in data['items']:
@ -570,11 +615,10 @@ class Google(Backend):
class LyricsPlugin(plugins.BeetsPlugin):
SOURCES = ['google', 'lyricwiki', 'lyrics.com', 'musixmatch']
SOURCES = ['google', 'lyricwiki', 'musixmatch', 'genius']
SOURCE_BACKENDS = {
'google': Google,
'lyricwiki': LyricsWiki,
'lyrics.com': LyricsCom,
'musixmatch': MusiXmatch,
'genius': Genius,
}
@ -594,6 +638,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
"76V-uFL5jks5dNvcGCdarqFjDhP9c",
'fallback': None,
'force': False,
'local': False,
'sources': self.SOURCES,
})
self.config['bing_client_secret'].redact = True
@ -601,28 +646,47 @@ class LyricsPlugin(plugins.BeetsPlugin):
self.config['google_engine_ID'].redact = True
self.config['genius_api_key'].redact = True
# State information for the ReST writer.
# First, the current artist we're writing.
self.artist = u'Unknown artist'
# The current album: False means no album yet.
self.album = False
# The current rest file content. None means the file is not
# open yet.
self.rest = None
available_sources = list(self.SOURCES)
sources = plugins.sanitize_choices(
self.config['sources'].as_str_seq(), available_sources)
if 'google' in sources:
if not self.config['google_API_key'].get():
self._log.warn(u'To use the google lyrics source, you must '
u'provide an API key in the configuration. '
u'See the documentation for further details.')
# We log a *debug* message here because the default
# configuration includes `google`. This way, the source
# is silent by default but can be enabled just by
# setting an API key.
self._log.debug(u'Disabling google source: '
u'no API key configured.')
sources.remove('google')
if not HAS_BEAUTIFUL_SOUP:
self._log.warn(u'To use the google lyrics source, you must '
u'install the beautifulsoup4 module. See the '
u'documentation for further details.')
elif not HAS_BEAUTIFUL_SOUP:
self._log.warning(u'To use the google lyrics source, you must '
u'install the beautifulsoup4 module. See '
u'the documentation for further details.')
sources.remove('google')
if 'genius' in sources and not HAS_BEAUTIFUL_SOUP:
self._log.debug(
u'The Genius backend requires BeautifulSoup, which is not '
u'installed, so the source is disabled.'
)
sources.remove('genius')
self.config['bing_lang_from'] = [
x.lower() for x in self.config['bing_lang_from'].as_str_seq()]
self.bing_auth_token = None
if not HAS_LANGDETECT and self.config['bing_client_secret'].get():
self._log.warn(u'To use bing translations, you need to '
self._log.warning(u'To use bing translations, you need to '
u'install the langdetect module. See the '
u'documentation for further details.')
@ -633,14 +697,14 @@ class LyricsPlugin(plugins.BeetsPlugin):
params = {
'client_id': 'beets',
'client_secret': self.config['bing_client_secret'],
'scope': 'http://api.microsofttranslator.com',
'scope': "https://api.microsofttranslator.com",
'grant_type': 'client_credentials',
}
oauth_url = 'https://datamarket.accesscontrol.windows.net/v2/OAuth2-13'
oauth_token = json.loads(requests.post(
oauth_url,
data=urllib.urlencode(params)).content)
data=urllib.parse.urlencode(params)).content)
if 'access_token' in oauth_token:
return "Bearer " + oauth_token['access_token']
else:
@ -654,27 +718,106 @@ class LyricsPlugin(plugins.BeetsPlugin):
action='store_true', default=False,
help=u'print lyrics to console',
)
cmd.parser.add_option(
u'-r', u'--write-rest', dest='writerest',
action='store', default=None, metavar='dir',
help=u'write lyrics to given directory as ReST files',
)
cmd.parser.add_option(
u'-f', u'--force', dest='force_refetch',
action='store_true', default=False,
help=u'always re-download lyrics',
)
cmd.parser.add_option(
u'-l', u'--local', dest='local_only',
action='store_true', default=False,
help=u'do not fetch missing lyrics',
)
def func(lib, opts, args):
# The "write to files" option corresponds to the
# import_write config value.
write = ui.should_write()
if opts.writerest:
self.writerest_indexes(opts.writerest)
for item in lib.items(ui.decargs(args)):
if not opts.local_only and not self.config['local']:
self.fetch_item_lyrics(
lib, item, write,
opts.force_refetch or self.config['force'],
)
if opts.printlyr and item.lyrics:
if item.lyrics:
if opts.printlyr:
ui.print_(item.lyrics)
if opts.writerest:
self.writerest(opts.writerest, item)
if opts.writerest:
# flush last artist
self.writerest(opts.writerest, None)
ui.print_(u'ReST files generated. to build, use one of:')
ui.print_(u' sphinx-build -b html %s _build/html'
% opts.writerest)
ui.print_(u' sphinx-build -b epub %s _build/epub'
% opts.writerest)
ui.print_((u' sphinx-build -b latex %s _build/latex '
u'&& make -C _build/latex all-pdf')
% opts.writerest)
cmd.func = func
return [cmd]
def writerest(self, directory, item):
"""Write the item to an ReST file
This will keep state (in the `rest` variable) in order to avoid
writing continuously to the same files.
"""
if item is None or slug(self.artist) != slug(item.albumartist):
if self.rest is not None:
path = os.path.join(directory, 'artists',
slug(self.artist) + u'.rst')
with open(path, 'wb') as output:
output.write(self.rest.encode('utf-8'))
self.rest = None
if item is None:
return
self.artist = item.albumartist.strip()
self.rest = u"%s\n%s\n\n.. contents::\n :local:\n\n" \
% (self.artist,
u'=' * len(self.artist))
if self.album != item.album:
tmpalbum = self.album = item.album.strip()
if self.album == '':
tmpalbum = u'Unknown album'
self.rest += u"%s\n%s\n\n" % (tmpalbum, u'-' * len(tmpalbum))
title_str = u":index:`%s`" % item.title.strip()
block = u'| ' + item.lyrics.replace(u'\n', u'\n| ')
self.rest += u"%s\n%s\n\n%s\n\n" % (title_str,
u'~' * len(title_str),
block)
def writerest_indexes(self, directory):
"""Write conf.py and index.rst files necessary for Sphinx
We write minimal configurations that are necessary for Sphinx
to operate. We do not overwrite existing files so that
customizations are respected."""
try:
os.makedirs(os.path.join(directory, 'artists'))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
indexfile = os.path.join(directory, 'index.rst')
if not os.path.exists(indexfile):
with open(indexfile, 'w') as output:
output.write(REST_INDEX_TEMPLATE)
conffile = os.path.join(directory, 'conf.py')
if not os.path.exists(conffile):
with open(conffile, 'w') as output:
output.write(REST_CONF_TEMPLATE)
def imported(self, session, task):
"""Import hook for fetching lyrics automatically.
"""
@ -685,7 +828,8 @@ class LyricsPlugin(plugins.BeetsPlugin):
def fetch_item_lyrics(self, lib, item, write, force):
"""Fetch and store lyrics for a single item. If ``write``, then the
lyrics will also be written to the file itself."""
lyrics will also be written to the file itself.
"""
# Skip if the item already has lyrics.
if not force and item.lyrics:
self._log.info(u'lyrics already present: {0}', item)
@ -740,7 +884,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
if self.bing_auth_token:
# Extract unique lines to limit API request size per song
text_lines = set(text.split('\n'))
url = ('http://api.microsofttranslator.com/v2/Http.svc/'
url = ('https://api.microsofttranslator.com/v2/Http.svc/'
'Translate?text=%s&to=%s' % ('|'.join(text_lines), to_lang))
r = requests.get(url,
headers={"Authorization ": self.bing_auth_token})
@ -751,7 +895,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
self.bing_auth_token = None
return self.append_translation(text, to_lang)
return text
lines_translated = ET.fromstring(r.text.encode('utf8')).text
lines_translated = ET.fromstring(r.text.encode('utf-8')).text
# Use a translation mapping dict to build resulting lyrics
translations = dict(zip(text_lines, lines_translated.split('|')))
result = ''

View file

@ -1,17 +1,17 @@
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Jeffrey Aylesworth <jeffrey@aylesworth.ca>
# This file is part of beets.
# Copyright (c) 2011, Jeffrey Aylesworth <mail@jeffrey.red>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
@ -24,6 +24,7 @@ import musicbrainzngs
import re
SUBMISSION_CHUNK_SIZE = 200
FETCH_CHUNK_SIZE = 100
UUID_REGEX = r'^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$'
@ -57,44 +58,93 @@ class MusicBrainzCollectionPlugin(BeetsPlugin):
super(MusicBrainzCollectionPlugin, self).__init__()
config['musicbrainz']['pass'].redact = True
musicbrainzngs.auth(
config['musicbrainz']['user'].get(unicode),
config['musicbrainz']['pass'].get(unicode),
config['musicbrainz']['user'].as_str(),
config['musicbrainz']['pass'].as_str(),
)
self.config.add({'auto': False})
self.config.add({
'auto': False,
'collection': u'',
'remove': False,
})
if self.config['auto']:
self.import_stages = [self.imported]
def _get_collection(self):
collections = mb_call(musicbrainzngs.get_collections)
if not collections['collection-list']:
raise ui.UserError(u'no collections exist for user')
# Get all collection IDs, avoiding event collections
collection_ids = [x['id'] for x in collections['collection-list']]
if not collection_ids:
raise ui.UserError(u'No collection found.')
# Check that the collection exists so we can present a nice error
collection = self.config['collection'].as_str()
if collection:
if collection not in collection_ids:
raise ui.UserError(u'invalid collection ID: {}'
.format(collection))
return collection
# No specified collection. Just return the first collection ID
return collection_ids[0]
def _get_albums_in_collection(self, id):
def _fetch(offset):
res = mb_call(
musicbrainzngs.get_releases_in_collection,
id,
limit=FETCH_CHUNK_SIZE,
offset=offset
)['collection']
return [x['id'] for x in res['release-list']], res['release-count']
offset = 0
albums_in_collection, release_count = _fetch(offset)
for i in range(0, release_count, FETCH_CHUNK_SIZE):
albums_in_collection += _fetch(offset)[0]
offset += FETCH_CHUNK_SIZE
return albums_in_collection
def commands(self):
mbupdate = Subcommand('mbupdate',
help=u'Update MusicBrainz collection')
mbupdate.parser.add_option('-r', '--remove',
action='store_true',
default=None,
dest='remove',
help='Remove albums not in beets library')
mbupdate.func = self.update_collection
return [mbupdate]
def remove_missing(self, collection_id, lib_albums):
lib_ids = set([x.mb_albumid for x in lib_albums])
albums_in_collection = self._get_albums_in_collection(collection_id)
remove_me = list(set(albums_in_collection) - lib_ids)
for i in range(0, len(remove_me), FETCH_CHUNK_SIZE):
chunk = remove_me[i:i + FETCH_CHUNK_SIZE]
mb_call(
musicbrainzngs.remove_releases_from_collection,
collection_id, chunk
)
def update_collection(self, lib, opts, args):
self.update_album_list(lib.albums())
self.config.set_args(opts)
remove_missing = self.config['remove'].get(bool)
self.update_album_list(lib, lib.albums(), remove_missing)
def imported(self, session, task):
"""Add each imported album to the collection.
"""
if task.is_album:
self.update_album_list([task.album])
self.update_album_list(session.lib, [task.album])
def update_album_list(self, album_list):
"""Update the MusicBrainz colleciton from a list of Beets albums
def update_album_list(self, lib, album_list, remove_missing=False):
"""Update the MusicBrainz collection from a list of Beets albums
"""
# Get the available collections.
collections = mb_call(musicbrainzngs.get_collections)
if not collections['collection-list']:
raise ui.UserError(u'no collections exist for user')
# Get the first release collection. MusicBrainz also has event
# collections, so we need to avoid adding to those.
for collection in collections['collection-list']:
if 'release-count' in collection:
collection_id = collection['id']
break
else:
raise ui.UserError(u'No collection found.')
collection_id = self._get_collection()
# Get a list of all the album IDs.
album_ids = []
@ -111,4 +161,6 @@ class MusicBrainzCollectionPlugin(BeetsPlugin):
u'Updating MusicBrainz collection {0}...', collection_id
)
submit_albums(collection_id, album_ids)
if remove_missing:
self.remove_missing(collection_id, lib.albums())
self._log.info(u'...MusicBrainz collection updated.')

View file

@ -36,7 +36,7 @@ class MBSubmitPlugin(BeetsPlugin):
super(MBSubmitPlugin, self).__init__()
self.config.add({
'format': '$track. $title - $artist ($length)',
'format': u'$track. $title - $artist ($length)',
'threshold': 'medium',
})
@ -56,5 +56,5 @@ class MBSubmitPlugin(BeetsPlugin):
return [PromptChoice(u'p', u'Print tracks', self.print_tracks)]
def print_tracks(self, session, task):
for i in task.items:
print_data(None, i, self.config['format'].get())
for i in sorted(task.items, key=lambda i: i.track):
print_data(None, i, self.config['format'].as_str())

View file

@ -117,23 +117,30 @@ class MBSyncPlugin(BeetsPlugin):
album_formatted)
continue
# Map recording MBIDs to their information. Recordings can appear
# multiple times on a release, so each MBID maps to a list of
# TrackInfo objects.
# Map release track and recording MBIDs to their information.
# Recordings can appear multiple times on a release, so each MBID
# maps to a list of TrackInfo objects.
releasetrack_index = dict()
track_index = defaultdict(list)
for track_info in album_info.tracks:
releasetrack_index[track_info.release_track_id] = track_info
track_index[track_info.track_id].append(track_info)
# Construct a track mapping according to MBIDs. This should work
# for albums that have missing or extra tracks. If there are
# multiple copies of a recording, they are disambiguated using
# their disc and track number.
# Construct a track mapping according to MBIDs (release track MBIDs
# first, if available, and recording MBIDs otherwise). This should
# work for albums that have missing or extra tracks.
mapping = {}
for item in items:
if item.mb_releasetrackid and \
item.mb_releasetrackid in releasetrack_index:
mapping[item] = releasetrack_index[item.mb_releasetrackid]
else:
candidates = track_index[item.mb_trackid]
if len(candidates) == 1:
mapping[item] = candidates[0]
else:
# If there are multiple copies of a recording, they are
# disambiguated using their disc and track number.
for c in candidates:
if (c.medium_index == item.track and
c.medium == item.disc):
@ -145,10 +152,13 @@ class MBSyncPlugin(BeetsPlugin):
with lib.transaction():
autotag.apply_metadata(album_info, mapping)
changed = False
# Find any changed item to apply MusicBrainz changes to album.
any_changed_item = items[0]
for item in items:
item_changed = ui.show_model_changes(item)
changed |= item_changed
if item_changed:
any_changed_item = item
apply_item_changes(lib, item, move, pretend, write)
if not changed:
@ -158,7 +168,7 @@ class MBSyncPlugin(BeetsPlugin):
if not pretend:
# Update album structure to reflect an item in it.
for key in library.Album.item_keys:
a[key] = items[0][key]
a[key] = any_changed_item[key]
a.store()
# Move album art (and any inconsistent items).

View file

@ -24,6 +24,7 @@ from importlib import import_module
from beets.util.confit import ConfigValueError
from beets import ui
from beets.plugins import BeetsPlugin
import six
METASYNC_MODULE = 'beetsplug.metasync'
@ -35,9 +36,7 @@ SOURCES = {
}
class MetaSource(object):
__metaclass__ = ABCMeta
class MetaSource(six.with_metaclass(ABCMeta, object)):
def __init__(self, config, log):
self.item_types = {}
self.config = config

View file

@ -21,7 +21,7 @@ from __future__ import division, absolute_import, print_function
from os.path import basename
from datetime import datetime
from time import mktime
from xml.sax.saxutils import escape
from xml.sax.saxutils import quoteattr
from beets.util import displayable_path
from beets.dbcore import types
@ -51,7 +51,7 @@ class Amarok(MetaSource):
queryXML = u'<query version="1.0"> \
<filters> \
<and><include field="filename" value="%s" /></and> \
<and><include field="filename" value=%s /></and> \
</filters> \
</query>'
@ -71,7 +71,9 @@ class Amarok(MetaSource):
# for the patch relative to the mount point. But the full path is part
# of the result set. So query for the filename and then try to match
# the correct item from the results we get back
results = self.collection.Query(self.queryXML % escape(basename(path)))
results = self.collection.Query(
self.queryXML % quoteattr(basename(path))
)
for result in results:
if result['xesam:url'] != path:
continue

View file

@ -23,8 +23,8 @@ import os
import shutil
import tempfile
import plistlib
import urllib
from urlparse import urlparse
from six.moves.urllib.parse import urlparse, unquote
from time import mktime
from beets import util
@ -57,7 +57,7 @@ def _norm_itunes_path(path):
# E.g., '\\G:\\Music\\bar' needs to be stripped to 'G:\\Music\\bar'
return util.bytestring_path(os.path.normpath(
urllib.unquote(urlparse(path).path)).lstrip('\\')).lower()
unquote(urlparse(path).path)).lstrip('\\')).lower()
class Itunes(MetaSource):

View file

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Pedro Silva.
# Copyright 2017, Quentin Young.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
@ -17,11 +18,16 @@
"""
from __future__ import division, absolute_import, print_function
import musicbrainzngs
from musicbrainzngs.musicbrainz import MusicBrainzError
from collections import defaultdict
from beets.autotag import hooks
from beets.library import Item
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, Subcommand
from beets import config
from beets.dbcore import types
def _missing_count(album):
@ -81,12 +87,18 @@ def _item(track_info, album_info, album_id):
class MissingPlugin(BeetsPlugin):
"""List missing tracks
"""
album_types = {
'missing': types.INTEGER,
}
def __init__(self):
super(MissingPlugin, self).__init__()
self.config.add({
'count': False,
'total': False,
'album': False,
})
self.album_template_fields['missing'] = _missing_count
@ -100,16 +112,32 @@ class MissingPlugin(BeetsPlugin):
self._command.parser.add_option(
u'-t', u'--total', dest='total', action='store_true',
help=u'count total of missing tracks')
self._command.parser.add_option(
u'-a', u'--album', dest='album', action='store_true',
help=u'show missing albums for artist instead of tracks')
self._command.parser.add_format_option()
def commands(self):
def _miss(lib, opts, args):
self.config.set_args(opts)
albms = self.config['album'].get()
helper = self._missing_albums if albms else self._missing_tracks
helper(lib, decargs(args))
self._command.func = _miss
return [self._command]
def _missing_tracks(self, lib, query):
"""Print a listing of tracks missing from each album in the library
matching query.
"""
albums = lib.albums(query)
count = self.config['count'].get()
total = self.config['total'].get()
fmt = config['format_album' if count else 'format_item'].get()
albums = lib.albums(decargs(args))
if total:
print(sum([_missing_count(a) for a in albums]))
return
@ -127,13 +155,67 @@ class MissingPlugin(BeetsPlugin):
for item in self._missing(album):
print_(format(item, fmt))
self._command.func = _miss
return [self._command]
def _missing_albums(self, lib, query):
"""Print a listing of albums missing from each artist in the library
matching query.
"""
total = self.config['total'].get()
albums = lib.albums(query)
# build dict mapping artist to list of their albums in library
albums_by_artist = defaultdict(list)
for alb in albums:
artist = (alb['albumartist'], alb['mb_albumartistid'])
albums_by_artist[artist].append(alb)
total_missing = 0
# build dict mapping artist to list of all albums
for artist, albums in albums_by_artist.items():
if artist[1] is None or artist[1] == "":
albs_no_mbid = [u"'" + a['album'] + u"'" for a in albums]
self._log.info(
u"No musicbrainz ID for artist '{}' found in album(s) {}; "
"skipping", artist[0], u", ".join(albs_no_mbid)
)
continue
try:
resp = musicbrainzngs.browse_release_groups(artist=artist[1])
release_groups = resp['release-group-list']
except MusicBrainzError as err:
self._log.info(
u"Couldn't fetch info for artist '{}' ({}) - '{}'",
artist[0], artist[1], err
)
continue
missing = []
present = []
for rg in release_groups:
missing.append(rg)
for alb in albums:
if alb['mb_releasegroupid'] == rg['id']:
missing.remove(rg)
present.append(rg)
break
total_missing += len(missing)
if total:
continue
missing_titles = {rg['title'] for rg in missing}
for release_title in missing_titles:
print_(u"{} - {}".format(artist[0], release_title))
if total:
print(total_missing)
def _missing(self, album):
"""Query MusicBrainz to determine items missing from `album`.
"""
item_mbids = map(lambda x: x.mb_trackid, album.items())
item_mbids = [x.mb_trackid for x in album.items()]
if len([i for i in album.items()]) < album.albumtotal:
# fetch missing items
# TODO: Implement caching that without breaking other stuff

View file

@ -40,36 +40,24 @@ mpd_config = config['mpd']
def is_url(path):
"""Try to determine if the path is an URL.
"""
if isinstance(path, bytes): # if it's bytes, then it's a path
return False
return path.split('://', 1)[0] in ['http', 'https']
# Use the MPDClient internals to get unicode.
# see http://www.tarmack.eu/code/mpdunicode.py for the general idea
class MPDClient(mpd.MPDClient):
def _write_command(self, command, args=[]):
args = [unicode(arg).encode('utf-8') for arg in args]
super(MPDClient, self)._write_command(command, args)
def _read_line(self):
line = super(MPDClient, self)._read_line()
if line is not None:
return line.decode('utf-8')
return None
class MPDClientWrapper(object):
def __init__(self, log):
self._log = log
self.music_directory = (
mpd_config['music_directory'].get(unicode))
mpd_config['music_directory'].as_str())
self.client = MPDClient()
self.client = mpd.MPDClient(use_unicode=True)
def connect(self):
"""Connect to the MPD.
"""
host = mpd_config['host'].get(unicode)
host = mpd_config['host'].as_str()
port = mpd_config['port'].get(int)
if host[0] in ['/', '~']:
@ -81,7 +69,7 @@ class MPDClientWrapper(object):
except socket.error as e:
raise ui.UserError(u'could not connect to MPD: {0}'.format(e))
password = mpd_config['password'].get(unicode)
password = mpd_config['password'].as_str()
if password:
try:
self.client.password(password)
@ -268,21 +256,30 @@ class MPDStats(object):
if not path:
return
if is_url(path):
self._log.info(u'playing stream {0}', displayable_path(path))
return
played, duration = map(int, status['time'].split(':', 1))
remaining = duration - played
if self.now_playing and self.now_playing['path'] != path:
skipped = self.handle_song_change(self.now_playing)
# mpd responds twice on a natural new song start
going_to_happen_twice = not skipped
if self.now_playing:
if self.now_playing['path'] != path:
self.handle_song_change(self.now_playing)
else:
going_to_happen_twice = False
# In case we got mpd play event with same song playing
# multiple times,
# assume low diff means redundant second play event
# after natural song start.
diff = abs(time.time() - self.now_playing['started'])
if diff <= self.time_threshold:
return
if self.now_playing['path'] == path and played == 0:
self.handle_song_change(self.now_playing)
if is_url(path):
self._log.info(u'playing stream {0}', displayable_path(path))
self.now_playing = None
return
if not going_to_happen_twice:
self._log.info(u'playing {0}', displayable_path(path))
self.now_playing = {
@ -328,7 +325,7 @@ class MPDStatsPlugin(plugins.BeetsPlugin):
'music_directory': config['directory'].as_filename(),
'rating': True,
'rating_mix': 0.75,
'host': u'localhost',
'host': os.environ.get('MPD_HOST', u'localhost'),
'port': 6600,
'password': u'',
})
@ -353,11 +350,11 @@ class MPDStatsPlugin(plugins.BeetsPlugin):
# Overrides for MPD settings.
if opts.host:
mpd_config['host'] = opts.host.decode('utf8')
mpd_config['host'] = opts.host.decode('utf-8')
if opts.port:
mpd_config['host'] = int(opts.port)
if opts.password:
mpd_config['password'] = opts.password.decode('utf8')
mpd_config['password'] = opts.password.decode('utf-8')
try:
MPDStats(lib, self._log).run()

View file

@ -27,6 +27,7 @@ from beets.plugins import BeetsPlugin
import os
import socket
from beets import config
import six
# No need to introduce a dependency on an MPD library for such a
@ -34,14 +35,14 @@ from beets import config
# easier.
class BufferedSocket(object):
"""Socket abstraction that allows reading by line."""
def __init__(self, host, port, sep='\n'):
def __init__(self, host, port, sep=b'\n'):
if host[0] in ['/', '~']:
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.sock.connect(os.path.expanduser(host))
else:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((host, port))
self.buf = ''
self.buf = b''
self.sep = sep
def readline(self):
@ -50,11 +51,11 @@ class BufferedSocket(object):
if not data:
break
self.buf += data
if '\n' in self.buf:
if self.sep in self.buf:
res, self.buf = self.buf.split(self.sep, 1)
return res + self.sep
else:
return ''
return b''
def send(self, data):
self.sock.send(data)
@ -67,7 +68,7 @@ class MPDUpdatePlugin(BeetsPlugin):
def __init__(self):
super(MPDUpdatePlugin, self).__init__()
config['mpd'].add({
'host': u'localhost',
'host': os.environ.get('MPD_HOST', u'localhost'),
'port': 6600,
'password': u'',
})
@ -86,9 +87,9 @@ class MPDUpdatePlugin(BeetsPlugin):
def update(self, lib):
self.update_mpd(
config['mpd']['host'].get(unicode),
config['mpd']['host'].as_str(),
config['mpd']['port'].get(int),
config['mpd']['password'].get(unicode),
config['mpd']['password'].as_str(),
)
def update_mpd(self, host='localhost', port=6600, password=None):
@ -101,28 +102,28 @@ class MPDUpdatePlugin(BeetsPlugin):
s = BufferedSocket(host, port)
except socket.error as e:
self._log.warning(u'MPD connection failed: {0}',
unicode(e.strerror))
six.text_type(e.strerror))
return
resp = s.readline()
if 'OK MPD' not in resp:
if b'OK MPD' not in resp:
self._log.warning(u'MPD connection failed: {0!r}', resp)
return
if password:
s.send('password "%s"\n' % password)
s.send(b'password "%s"\n' % password.encode('utf8'))
resp = s.readline()
if 'OK' not in resp:
if b'OK' not in resp:
self._log.warning(u'Authentication failed: {0!r}', resp)
s.send('close\n')
s.send(b'close\n')
s.close()
return
s.send('update\n')
s.send(b'update\n')
resp = s.readline()
if 'updating_db' not in resp:
if b'updating_db' not in resp:
self._log.warning(u'Update failed: {0!r}', resp)
s.send('close\n')
s.send(b'close\n')
s.close()
self._log.info(u'Database updated.')

View file

@ -13,24 +13,43 @@ import os
from beets import config, util
from beets.plugins import BeetsPlugin
from beets.util import ancestry
import six
def convert_perm(perm):
"""If the perm is a int it will first convert it to a string and back
to an oct int. Else it just converts it to oct.
"""Convert a string to an integer, interpreting the text as octal.
Or, if `perm` is an integer, reinterpret it as an octal number that
has been "misinterpreted" as decimal.
"""
if isinstance(perm, int):
return int(bytes(perm), 8)
else:
if isinstance(perm, six.integer_types):
perm = six.text_type(perm)
return int(perm, 8)
def check_permissions(path, permission):
"""Checks the permissions of a path.
"""Check whether the file's permissions equal the given vector.
Return a boolean.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
def assert_permissions(path, permission, log):
"""Check whether the file's permissions are as expected, otherwise,
log a warning message. Return a boolean indicating the match, like
`check_permissions`.
"""
if not check_permissions(util.syspath(path), permission):
log.warning(
u'could not set permissions on {}',
util.displayable_path(path),
)
log.debug(
u'set permissions to {}, but permissions are now {}',
permission,
os.stat(util.syspath(path)).st_mode & 0o777,
)
def dirs_in_library(library, item):
"""Creates a list of ancestor directories in the beets library path.
"""
@ -45,22 +64,22 @@ class Permissions(BeetsPlugin):
# Adding defaults.
self.config.add({
u'file': 644,
u'dir': 755
u'file': '644',
u'dir': '755',
})
self.register_listener('item_imported', permissions)
self.register_listener('album_imported', permissions)
self.register_listener('item_imported', self.fix)
self.register_listener('album_imported', self.fix)
def permissions(lib, item=None, album=None):
"""Running the permission fixer.
def fix(self, lib, item=None, album=None):
"""Fix the permissions for an imported Item or Album.
"""
# Getting the config.
# Get the configured permissions. The user can specify this either a
# string (in YAML quotes) or, for convenience, as an integer so the
# quotes can be omitted. In the latter case, we need to reinterpret the
# integer as octal, not decimal.
file_perm = config['permissions']['file'].get()
dir_perm = config['permissions']['dir'].get()
# Converts permissions to oct.
file_perm = convert_perm(file_perm)
dir_perm = convert_perm(dir_perm)
@ -77,13 +96,14 @@ def permissions(lib, item=None, album=None):
for path in file_chmod_queue:
# Changing permissions on the destination file.
os.chmod(util.bytestring_path(path), file_perm)
self._log.debug(
u'setting file permissions on {}',
util.displayable_path(path),
)
os.chmod(util.syspath(path), file_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), file_perm):
message = u'There was a problem setting permission on {}'.format(
path)
print(message)
assert_permissions(path, file_perm, self._log)
# Adding directories to the directory chmod queue.
dir_chmod_queue.update(
@ -93,10 +113,11 @@ def permissions(lib, item=None, album=None):
# Change permissions for the directories.
for path in dir_chmod_queue:
# Chaning permissions on the destination directory.
os.chmod(util.bytestring_path(path), dir_perm)
self._log.debug(
u'setting directory permissions on {}',
util.displayable_path(path),
)
os.chmod(util.syspath(path), dir_perm)
# Checks if the destination path has the permissions configured.
if not check_permissions(util.bytestring_path(path), dir_perm):
message = u'There was a problem setting permission on {}'.format(
path)
print(message)
assert_permissions(path, dir_perm, self._log)

View file

@ -19,17 +19,41 @@ from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
from beets.ui.commands import PromptChoice
from beets import config
from beets import ui
from beets import util
from os.path import relpath
from tempfile import NamedTemporaryFile
import subprocess
# Indicate where arguments should be inserted into the command string.
# If this is missing, they're placed at the end.
ARGS_MARKER = '$args'
def play(command_str, selection, paths, open_args, log, item_type='track',
keep_open=False):
"""Play items in paths with command_str and optional arguments. If
keep_open, return to beets, otherwise exit once command runs.
"""
# Print number of tracks or albums to be played, log command to be run.
item_type += 's' if len(selection) > 1 else ''
ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type))
log.debug(u'executing command: {} {!r}', command_str, open_args)
try:
if keep_open:
command = util.shlex_split(command_str)
command = command + open_args
subprocess.call(command)
else:
util.interactive_open(open_args, command_str)
except OSError as exc:
raise ui.UserError(
"Could not play the query: {0}".format(exc))
class PlayPlugin(BeetsPlugin):
def __init__(self):
@ -40,11 +64,12 @@ class PlayPlugin(BeetsPlugin):
'use_folders': False,
'relative_to': None,
'raw': False,
# Backwards compatibility. See #1803 and line 74
'warning_threshold': -2,
'warning_treshold': 100,
'warning_threshold': 100,
})
self.register_listener('before_choose_candidate',
self.before_choose_candidate_listener)
def commands(self):
play_command = Subcommand(
'play',
@ -56,41 +81,22 @@ class PlayPlugin(BeetsPlugin):
action='store',
help=u'add additional arguments to the command',
)
play_command.func = self.play_music
play_command.parser.add_option(
u'-y', u'--yes',
action="store_true",
help=u'skip the warning threshold',
)
play_command.func = self._play_command
return [play_command]
def play_music(self, lib, opts, args):
"""Execute query, create temporary playlist and execute player
command passing that playlist, at request insert optional arguments.
def _play_command(self, lib, opts, args):
"""The CLI command function for `beet play`. Create a list of paths
from query, determine if tracks or albums are to be played.
"""
command_str = config['play']['command'].get()
if not command_str:
command_str = util.open_anything()
use_folders = config['play']['use_folders'].get(bool)
relative_to = config['play']['relative_to'].get()
raw = config['play']['raw'].get(bool)
warning_threshold = config['play']['warning_threshold'].get(int)
# We use -2 as a default value for warning_threshold to detect if it is
# set or not. We can't use a falsey value because it would have an
# actual meaning in the configuration of this plugin, and we do not use
# -1 because some people might use it as a value to obtain no warning,
# which wouldn't be that bad of a practice.
if warning_threshold == -2:
# if warning_threshold has not been set by user, look for
# warning_treshold, to preserve backwards compatibility. See #1803.
# warning_treshold has the correct default value of 100.
warning_threshold = config['play']['warning_treshold'].get(int)
if relative_to:
relative_to = util.normpath(relative_to)
# Add optional arguments to the player command.
if opts.args:
if ARGS_MARKER in command_str:
command_str = command_str.replace(ARGS_MARKER, opts.args)
else:
command_str = u"{} {}".format(command_str, opts.args)
# Perform search by album and add folders rather than tracks to
# playlist.
if opts.album:
@ -110,46 +116,95 @@ class PlayPlugin(BeetsPlugin):
else:
selection = lib.items(ui.decargs(args))
paths = [item.path for item in selection]
if relative_to:
paths = [relpath(path, relative_to) for path in paths]
item_type = 'track'
item_type += 's' if len(selection) > 1 else ''
if relative_to:
paths = [relpath(path, relative_to) for path in paths]
if not selection:
ui.print_(ui.colorize('text_warning',
u'No {0} to play.'.format(item_type)))
return
open_args = self._playlist_or_paths(paths)
command_str = self._command_str(opts.args)
# Check if the selection exceeds configured threshold. If True,
# cancel, otherwise proceed with play command.
if opts.yes or not self._exceeds_threshold(
selection, command_str, open_args, item_type):
play(command_str, selection, paths, open_args, self._log,
item_type)
def _command_str(self, args=None):
"""Create a command string from the config command and optional args.
"""
command_str = config['play']['command'].get()
if not command_str:
return util.open_anything()
# Add optional arguments to the player command.
if args:
if ARGS_MARKER in command_str:
return command_str.replace(ARGS_MARKER, args)
else:
return u"{} {}".format(command_str, args)
else:
# Don't include the marker in the command.
return command_str.replace(" " + ARGS_MARKER, "")
def _playlist_or_paths(self, paths):
"""Return either the raw paths of items or a playlist of the items.
"""
if config['play']['raw']:
return paths
else:
return [self._create_tmp_playlist(paths)]
def _exceeds_threshold(self, selection, command_str, open_args,
item_type='track'):
"""Prompt user whether to abort if playlist exceeds threshold. If
True, cancel playback. If False, execute play command.
"""
warning_threshold = config['play']['warning_threshold'].get(int)
# Warn user before playing any huge playlists.
if warning_threshold and len(selection) > warning_threshold:
if len(selection) > 1:
item_type += 's'
ui.print_(ui.colorize(
'text_warning',
u'You are about to queue {0} {1}.'.format(
len(selection), item_type)))
if ui.input_options(('Continue', 'Abort')) == 'a':
return
if ui.input_options((u'Continue', u'Abort')) == 'a':
return True
ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type))
if raw:
open_args = paths
else:
open_args = [self._create_tmp_playlist(paths)]
self._log.debug(u'executing command: {} {}', command_str,
b' '.join(open_args))
try:
util.interactive_open(open_args, command_str)
except OSError as exc:
raise ui.UserError(
"Could not play the query: {0}".format(exc))
return False
def _create_tmp_playlist(self, paths_list):
"""Create a temporary .m3u file. Return the filename.
"""
m3u = NamedTemporaryFile('w', suffix='.m3u', delete=False)
m3u = NamedTemporaryFile('wb', suffix='.m3u', delete=False)
for item in paths_list:
m3u.write(item + b'\n')
m3u.close()
return m3u.name
def before_choose_candidate_listener(self, session, task):
"""Append a "Play" choice to the interactive importer prompt.
"""
return [PromptChoice('y', 'plaY', self.importer_play)]
def importer_play(self, session, task):
"""Get items from current import task and send to play function.
"""
selection = task.items
paths = [item.path for item in selection]
open_args = self._playlist_or_paths(paths)
command_str = self._command_str()
if not self._exceeds_threshold(selection, command_str, open_args):
play(command_str, selection, paths, open_args, self._log,
keep_open=True)

View file

@ -12,9 +12,8 @@ Put something like the following in your config.yaml to configure:
from __future__ import division, absolute_import, print_function
import requests
from urlparse import urljoin
from urllib import urlencode
import xml.etree.ElementTree as ET
from six.moves.urllib.parse import urljoin, urlencode
from beets import config
from beets.plugins import BeetsPlugin
@ -68,6 +67,7 @@ class PlexUpdate(BeetsPlugin):
u'token': u'',
u'library_name': u'Music'})
config['plex']['token'].redact = True
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):

View file

@ -24,56 +24,124 @@ from operator import attrgetter
from itertools import groupby
def random_item(lib, opts, args):
query = decargs(args)
if opts.album:
objs = list(lib.albums(query))
def _length(obj, album):
"""Get the duration of an item or album.
"""
if album:
return sum(i.length for i in obj.items())
else:
objs = list(lib.items(query))
return obj.length
if opts.equal_chance:
def _equal_chance_permutation(objs, field='albumartist'):
"""Generate (lazily) a permutation of the objects where every group
with equal values for `field` have an equal chance of appearing in
any given position.
"""
# Group the objects by artist so we can sample from them.
key = attrgetter('albumartist')
key = attrgetter(field)
objs.sort(key=key)
objs_by_artists = {}
for artist, v in groupby(objs, key):
objs_by_artists[artist] = list(v)
objs = []
for _ in range(opts.number):
# Terminate early if we're out of objects to select.
if not objs_by_artists:
break
# While we still have artists with music to choose from, pick one
# randomly and pick a track from that artist.
while objs_by_artists:
# Choose an artist and an object for that artist, removing
# this choice from the pool.
artist = random.choice(objs_by_artists.keys())
artist = random.choice(list(objs_by_artists.keys()))
objs_from_artist = objs_by_artists[artist]
i = random.randint(0, len(objs_from_artist) - 1)
objs.append(objs_from_artist.pop(i))
yield objs_from_artist.pop(i)
# Remove the artist if we've used up all of its objects.
if not objs_from_artist:
del objs_by_artists[artist]
else:
number = min(len(objs), opts.number)
objs = random.sample(objs, number)
for item in objs:
print_(format(item))
def _take(iter, num):
"""Return a list containing the first `num` values in `iter` (or
fewer, if the iterable ends early).
"""
out = []
for val in iter:
out.append(val)
num -= 1
if num <= 0:
break
return out
def _take_time(iter, secs, album):
"""Return a list containing the first values in `iter`, which should
be Item or Album objects, that add up to the given amount of time in
seconds.
"""
out = []
total_time = 0.0
for obj in iter:
length = _length(obj, album)
if total_time + length <= secs:
out.append(obj)
total_time += length
return out
def random_objs(objs, album, number=1, time=None, equal_chance=False):
"""Get a random subset of the provided `objs`.
If `number` is provided, produce that many matches. Otherwise, if
`time` is provided, instead select a list whose total time is close
to that number of minutes. If `equal_chance` is true, give each
artist an equal chance of being included so that artists with more
songs are not represented disproportionately.
"""
# Permute the objects either in a straightforward way or an
# artist-balanced way.
if equal_chance:
perm = _equal_chance_permutation(objs)
else:
perm = objs
random.shuffle(perm) # N.B. This shuffles the original list.
# Select objects by time our count.
if time:
return _take_time(perm, time * 60, album)
else:
return _take(perm, number)
def random_func(lib, opts, args):
"""Select some random items or albums and print the results.
"""
# Fetch all the objects matching the query into a list.
query = decargs(args)
if opts.album:
objs = list(lib.albums(query))
else:
objs = list(lib.items(query))
# Print a random subset.
objs = random_objs(objs, opts.album, opts.number, opts.time,
opts.equal_chance)
for obj in objs:
print_(format(obj))
random_cmd = Subcommand('random',
help=u'chose a random track or album')
help=u'choose a random track or album')
random_cmd.parser.add_option(
u'-n', u'--number', action='store', type="int",
help=u'number of objects to choose', default=1)
random_cmd.parser.add_option(
u'-e', u'--equal-chance', action='store_true',
help=u'each artist has the same chance')
random_cmd.parser.add_option(
u'-t', u'--time', action='store', type="float",
help=u'total length in minutes of objects to choose')
random_cmd.parser.add_all_common_options()
random_cmd.func = random_item
random_cmd.func = random_func
class Random(BeetsPlugin):

View file

@ -18,15 +18,15 @@ from __future__ import division, absolute_import, print_function
import subprocess
import os
import collections
import itertools
import sys
import warnings
import re
import xml.parsers.expat
from six.moves import zip
from beets import logging
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import syspath, command_output, displayable_path
from beets.util import (syspath, command_output, bytestring_path,
displayable_path, py3_path)
# Utilities.
@ -60,7 +60,7 @@ def call(args):
except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See:
# http://code.google.com/p/beets/issues/detail?id=499
# https://github.com/google-code-export/beets/issues/499
raise ReplayGainError(u"argument encoding failed")
@ -102,9 +102,9 @@ class Bs1770gainBackend(Backend):
'method': 'replaygain',
})
self.chunk_at = config['chunk_at'].as_number()
self.method = b'--' + bytes(config['method'].get(unicode))
self.method = '--' + config['method'].as_str()
cmd = b'bs1770gain'
cmd = 'bs1770gain'
try:
call([cmd, self.method])
self.command = cmd
@ -194,13 +194,14 @@ class Bs1770gainBackend(Backend):
"""
# Construct shell command.
cmd = [self.command]
cmd = cmd + [self.method]
cmd = cmd + [b'-it']
cmd += [self.method]
cmd += ['--xml', '-p']
# Workaround for Windows: the underlying tool fails on paths
# with the \\?\ prefix, so we don't use it here. This
# prevents the backend from working with long paths.
args = cmd + [syspath(i.path, prefix=False) for i in items]
path_list = [i.path for i in items]
# Invoke the command.
self._log.debug(
@ -209,40 +210,65 @@ class Bs1770gainBackend(Backend):
output = call(args)
self._log.debug(u'analysis finished: {0}', output)
results = self.parse_tool_output(output,
len(items) + is_album)
results = self.parse_tool_output(output, path_list, is_album)
self._log.debug(u'{0} items, {1} results', len(items), len(results))
return results
def parse_tool_output(self, text, num_lines):
def parse_tool_output(self, text, path_list, is_album):
"""Given the output from bs1770gain, parse the text and
return a list of dictionaries
containing information about each analyzed file.
"""
out = []
data = text.decode('utf8', errors='ignore')
regex = re.compile(
ur'(\s{2,2}\[\d+\/\d+\].*?|\[ALBUM\].*?)'
'(?=\s{2,2}\[\d+\/\d+\]|\s{2,2}\[ALBUM\]'
':|done\.\s)', re.DOTALL | re.UNICODE)
results = re.findall(regex, data)
for parts in results[0:num_lines]:
part = parts.split(b'\n')
if len(part) == 0:
self._log.debug(u'bad tool output: {0!r}', text)
raise ReplayGainError(u'bs1770gain failed')
per_file_gain = {}
album_gain = {} # mutable variable so it can be set from handlers
parser = xml.parsers.expat.ParserCreate(encoding='utf-8')
state = {'file': None, 'gain': None, 'peak': None}
def start_element_handler(name, attrs):
if name == u'track':
state['file'] = bytestring_path(attrs[u'file'])
if state['file'] in per_file_gain:
raise ReplayGainError(
u'duplicate filename in bs1770gain output')
elif name == u'integrated':
state['gain'] = float(attrs[u'lu'])
elif name == u'sample-peak':
state['peak'] = float(attrs[u'factor'])
def end_element_handler(name):
if name == u'track':
if state['gain'] is None or state['peak'] is None:
raise ReplayGainError(u'could not parse gain or peak from '
'the output of bs1770gain')
per_file_gain[state['file']] = Gain(state['gain'],
state['peak'])
state['gain'] = state['peak'] = None
elif name == u'summary':
if state['gain'] is None or state['peak'] is None:
raise ReplayGainError(u'could not parse gain or peak from '
'the output of bs1770gain')
album_gain["album"] = Gain(state['gain'], state['peak'])
state['gain'] = state['peak'] = None
parser.StartElementHandler = start_element_handler
parser.EndElementHandler = end_element_handler
parser.Parse(text, True)
if len(per_file_gain) != len(path_list):
raise ReplayGainError(
u'the number of results returned by bs1770gain does not match '
'the number of files passed to it')
# bs1770gain does not return the analysis results in the order that
# files are passed on the command line, because it is sorting the files
# internally. We must recover the order from the filenames themselves.
try:
song = {
'file': part[0],
'gain': float((part[1].split('/'))[1].split('LU')[0]),
'peak': float(part[2].split('/')[1]),
}
except IndexError:
self._log.info(u'bs1770gain reports (faulty file?): {}', parts)
continue
out.append(Gain(song['gain'], song['peak']))
out = [per_file_gain[os.path.basename(p)] for p in path_list]
except KeyError:
raise ReplayGainError(
u'unrecognized filename in bs1770gain output '
'(bs1770gain can only deal with utf-8 file names)')
if is_album:
out.append(album_gain["album"])
return out
@ -256,7 +282,7 @@ class CommandBackend(Backend):
'noclip': True,
})
self.command = config["command"].get(unicode)
self.command = config["command"].as_str()
if self.command:
# Explicit executable path.
@ -267,9 +293,9 @@ class CommandBackend(Backend):
)
else:
# Check whether the program is in $PATH.
for cmd in (b'mp3gain', b'aacgain'):
for cmd in ('mp3gain', 'aacgain'):
try:
call([cmd, b'-v'])
call([cmd, '-v'])
self.command = cmd
except OSError:
pass
@ -286,7 +312,7 @@ class CommandBackend(Backend):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
supported_items = filter(self.format_supported, items)
supported_items = list(filter(self.format_supported, items))
output = self.compute_gain(supported_items, False)
return output
@ -297,7 +323,7 @@ class CommandBackend(Backend):
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = filter(self.format_supported, album.items())
supported_items = list(filter(self.format_supported, album.items()))
if len(supported_items) != len(album.items()):
self._log.debug(u'tracks are of unsupported format')
return AlbumGain(None, [])
@ -334,14 +360,14 @@ class CommandBackend(Backend):
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd = [self.command, b'-o', b'-s', b's']
cmd = [self.command, '-o', '-s', 's']
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + [b'-k']
cmd = cmd + ['-k']
else:
# Disable clipping warning.
cmd = cmd + [b'-c']
cmd = cmd + [b'-d', bytes(self.gain_offset)]
cmd = cmd + ['-c']
cmd = cmd + ['-d', str(self.gain_offset)]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug(u'analyzing {0} files', len(items))
@ -574,7 +600,7 @@ class GStreamerBackend(Backend):
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", syspath(self._file.path))
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
@ -587,16 +613,6 @@ class GStreamerBackend(Backend):
self._file = self._files.pop(0)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", syspath(self._file.path))
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
@ -607,6 +623,19 @@ class GStreamerBackend(Backend):
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", py3_path(syspath(self._file.path)))
self._decbin.link(self._conv)
self._pipe.set_state(self.Gst.State.READY)
return True
def _set_next_file(self):
@ -794,7 +823,7 @@ class ReplayGainPlugin(BeetsPlugin):
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend,
"bs1770gain": Bs1770gainBackend
"bs1770gain": Bs1770gainBackend,
}
def __init__(self):
@ -806,10 +835,11 @@ class ReplayGainPlugin(BeetsPlugin):
'auto': True,
'backend': u'command',
'targetlevel': 89,
'r128': ['Opus'],
})
self.overwrite = self.config['overwrite'].get(bool)
backend_name = self.config['backend'].get(unicode)
backend_name = self.config['backend'].as_str()
if backend_name not in self.backends:
raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. "
@ -823,6 +853,9 @@ class ReplayGainPlugin(BeetsPlugin):
if self.config['auto']:
self.import_stages = [self.imported]
# Formats to use R128.
self.r128_whitelist = self.config['r128'].as_str_seq()
try:
self.backend_instance = self.backends[backend_name](
self.config, self._log
@ -831,9 +864,19 @@ class ReplayGainPlugin(BeetsPlugin):
raise ui.UserError(
u'replaygain initialization failed: {0}'.format(e))
self.r128_backend_instance = ''
def should_use_r128(self, item):
"""Checks the plugin setting to decide whether the calculation
should be done using the EBU R128 standard and use R128_ tags instead.
"""
return item.format in self.r128_whitelist
def track_requires_gain(self, item):
return self.overwrite or \
(not item.rg_track_gain or not item.rg_track_peak)
(self.should_use_r128(item) and not item.r128_track_gain) or \
(not self.should_use_r128(item) and
(not item.rg_track_gain or not item.rg_track_peak))
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
@ -841,7 +884,11 @@ class ReplayGainPlugin(BeetsPlugin):
# needs recalculation, we still get an accurate album gain
# value.
return self.overwrite or \
any([not item.rg_album_gain or not item.rg_album_peak
any([self.should_use_r128(item) and
(not item.r128_track_gain or not item.r128_album_gain)
for item in album.items()]) or \
any([not self.should_use_r128(item) and
(not item.rg_album_gain or not item.rg_album_peak)
for item in album.items()])
def store_track_gain(self, item, track_gain):
@ -852,6 +899,12 @@ class ReplayGainPlugin(BeetsPlugin):
self._log.debug(u'applied track gain {0}, peak {1}',
item.rg_track_gain, item.rg_track_peak)
def store_track_r128_gain(self, item, track_gain):
item.r128_track_gain = int(round(track_gain.gain * pow(2, 8)))
item.store()
self._log.debug(u'applied track gain {0}', item.r128_track_gain)
def store_album_gain(self, album, album_gain):
album.rg_album_gain = album_gain.gain
album.rg_album_peak = album_gain.peak
@ -860,7 +913,13 @@ class ReplayGainPlugin(BeetsPlugin):
self._log.debug(u'applied album gain {0}, peak {1}',
album.rg_album_gain, album.rg_album_peak)
def handle_album(self, album, write):
def store_album_r128_gain(self, album, album_gain):
album.r128_album_gain = int(round(album_gain.gain * pow(2, 8)))
album.store()
self._log.debug(u'applied album gain {0}', album.r128_album_gain)
def handle_album(self, album, write, force=False):
"""Compute album and track replay gain store it in all of the
album's items.
@ -868,24 +927,41 @@ class ReplayGainPlugin(BeetsPlugin):
item. If replay gain information is already present in all
items, nothing is done.
"""
if not self.album_requires_gain(album):
if not force and not self.album_requires_gain(album):
self._log.info(u'Skipping album {0}', album)
return
self._log.info(u'analyzing {0}', album)
if (any([self.should_use_r128(item) for item in album.items()]) and not
all(([self.should_use_r128(item) for item in album.items()]))):
raise ReplayGainError(
u"Mix of ReplayGain and EBU R128 detected"
u" for some tracks in album {0}".format(album)
)
if any([self.should_use_r128(item) for item in album.items()]):
if self.r128_backend_instance == '':
self.init_r128_backend()
backend_instance = self.r128_backend_instance
store_track_gain = self.store_track_r128_gain
store_album_gain = self.store_album_r128_gain
else:
backend_instance = self.backend_instance
store_track_gain = self.store_track_gain
store_album_gain = self.store_album_gain
try:
album_gain = self.backend_instance.compute_album_gain(album)
album_gain = backend_instance.compute_album_gain(album)
if len(album_gain.track_gains) != len(album.items()):
raise ReplayGainError(
u"ReplayGain backend failed "
u"for some tracks in album {0}".format(album)
)
self.store_album_gain(album, album_gain.album_gain)
for item, track_gain in itertools.izip(album.items(),
album_gain.track_gains):
self.store_track_gain(item, track_gain)
store_album_gain(album, album_gain.album_gain)
for item, track_gain in zip(album.items(), album_gain.track_gains):
store_track_gain(item, track_gain)
if write:
item.try_write()
except ReplayGainError as e:
@ -894,27 +970,36 @@ class ReplayGainPlugin(BeetsPlugin):
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e))
def handle_track(self, item, write):
def handle_track(self, item, write, force=False):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not self.track_requires_gain(item):
if not force and not self.track_requires_gain(item):
self._log.info(u'Skipping track {0}', item)
return
self._log.info(u'analyzing {0}', item)
if self.should_use_r128(item):
if self.r128_backend_instance == '':
self.init_r128_backend()
backend_instance = self.r128_backend_instance
store_track_gain = self.store_track_r128_gain
else:
backend_instance = self.backend_instance
store_track_gain = self.store_track_gain
try:
track_gains = self.backend_instance.compute_track_gain([item])
track_gains = backend_instance.compute_track_gain([item])
if len(track_gains) != 1:
raise ReplayGainError(
u"ReplayGain backend failed for track {0}".format(item)
)
self.store_track_gain(item, track_gains[0])
store_track_gain(item, track_gains[0])
if write:
item.try_write()
except ReplayGainError as e:
@ -923,6 +1008,19 @@ class ReplayGainPlugin(BeetsPlugin):
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e))
def init_r128_backend(self):
backend_name = 'bs1770gain'
try:
self.r128_backend_instance = self.backends[backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
u'replaygain initialization failed: {0}'.format(e))
self.r128_backend_instance.method = '--ebu'
def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``.
"""
@ -935,19 +1033,28 @@ class ReplayGainPlugin(BeetsPlugin):
"""Return the "replaygain" ui subcommand.
"""
def func(lib, opts, args):
self._log.setLevel(logging.INFO)
write = ui.should_write()
write = ui.should_write(opts.write)
force = opts.force
if opts.album:
for album in lib.albums(ui.decargs(args)):
self.handle_album(album, write)
self.handle_album(album, write, force)
else:
for item in lib.items(ui.decargs(args)):
self.handle_track(item, write)
self.handle_track(item, write, force)
cmd = ui.Subcommand('replaygain', help=u'analyze for ReplayGain')
cmd.parser.add_album_option()
cmd.parser.add_option(
"-f", "--force", dest="force", action="store_true", default=False,
help=u"analyze all files, including those that "
"already have ReplayGain metadata")
cmd.parser.add_option(
"-w", "--write", default=None, action="store_true",
help=u"write new metadata to files' tags")
cmd.parser.add_option(
"-W", "--nowrite", dest="write", action="store_false",
help=u"don't write metadata (opposite of -w)")
cmd.func = func
return [cmd]

View file

@ -51,7 +51,7 @@ class RewritePlugin(BeetsPlugin):
# Gather all the rewrite rules for each field.
rules = defaultdict(list)
for key, view in self.config.items():
value = view.get(unicode)
value = view.as_str()
try:
fieldname, pattern = key.split(None, 1)
except ValueError:
@ -68,7 +68,7 @@ class RewritePlugin(BeetsPlugin):
rules['albumartist'].append((pattern, value))
# Replace each template field with the new rewriter function.
for fieldname, fieldrules in rules.iteritems():
for fieldname, fieldrules in rules.items():
getter = rewriter(fieldname, fieldrules)
self.template_fields[fieldname] = getter
if fieldname in library.Album._fields:

View file

@ -24,6 +24,7 @@ from beets import ui
from beets import util
from beets import config
from beets import mediafile
import mutagen
_MUTAGEN_FORMATS = {
'asf': 'ASF',
@ -106,7 +107,7 @@ class ScrubPlugin(BeetsPlugin):
for tag in f.keys():
del f[tag]
f.save()
except IOError as exc:
except (IOError, mutagen.MutagenError) as exc:
self._log.error(u'could not scrub {0}: {1}',
util.displayable_path(path), exc)
@ -119,10 +120,11 @@ class ScrubPlugin(BeetsPlugin):
try:
mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool))
except IOError as exc:
except mediafile.UnreadableFileError as exc:
self._log.error(u'could not open file to scrub: {0}',
exc)
art = mf.art
return
images = mf.images
# Remove all tags.
self._scrub(item.path)
@ -131,12 +133,15 @@ class ScrubPlugin(BeetsPlugin):
if restore:
self._log.debug(u'writing new tags after scrub')
item.try_write()
if art:
if images:
self._log.debug(u'restoring art')
try:
mf = mediafile.MediaFile(util.syspath(item.path),
config['id3v23'].get(bool))
mf.art = art
mf.images = images
mf.save()
except mediafile.UnreadableFileError as exc:
self._log.error(u'could not write tags: {0}', exc)
def import_task_files(self, session, task):
"""Automatically scrub imported files."""

View file

@ -20,11 +20,13 @@ from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
from beets import ui
from beets.util import mkdirall, normpath, syspath
from beets.util import (mkdirall, normpath, sanitize_path, syspath,
bytestring_path)
from beets.library import Item, Album, parse_query_string
from beets.dbcore import OrQuery
from beets.dbcore.query import MultipleSort, ParsingError
import os
import six
class SmartPlaylistPlugin(BeetsPlugin):
@ -97,7 +99,7 @@ class SmartPlaylistPlugin(BeetsPlugin):
for playlist in self.config['playlists'].get(list):
if 'name' not in playlist:
self._log.warn(u"playlist configuration is missing name")
self._log.warning(u"playlist configuration is missing name")
continue
playlist_data = (playlist['name'],)
@ -106,7 +108,7 @@ class SmartPlaylistPlugin(BeetsPlugin):
qs = playlist.get(key)
if qs is None:
query_and_sort = None, None
elif isinstance(qs, basestring):
elif isinstance(qs, six.string_types):
query_and_sort = parse_query_string(qs, Model)
elif len(qs) == 1:
query_and_sort = parse_query_string(qs[0], Model)
@ -133,7 +135,7 @@ class SmartPlaylistPlugin(BeetsPlugin):
playlist_data += (query_and_sort,)
except ParsingError as exc:
self._log.warn(u"invalid query in playlist {}: {}",
self._log.warning(u"invalid query in playlist {}: {}",
playlist['name'], exc)
continue
@ -165,10 +167,14 @@ class SmartPlaylistPlugin(BeetsPlugin):
len(self._matched_playlists))
playlist_dir = self.config['playlist_dir'].as_filename()
playlist_dir = bytestring_path(playlist_dir)
relative_to = self.config['relative_to'].get()
if relative_to:
relative_to = normpath(relative_to)
# Maps playlist filenames to lists of track filenames.
m3us = {}
for playlist in self._matched_playlists:
name, (query, q_sort), (album_query, a_q_sort) = playlist
self._log.debug(u"Creating playlist {0}", name)
@ -180,11 +186,11 @@ class SmartPlaylistPlugin(BeetsPlugin):
for album in lib.albums(album_query, a_q_sort):
items.extend(album.items())
m3us = {}
# As we allow tags in the m3u names, we'll need to iterate through
# the items and generate the correct m3u file names.
for item in items:
m3u_name = item.evaluate_template(name, True)
m3u_name = sanitize_path(m3u_name, lib.replacements)
if m3u_name not in m3us:
m3us[m3u_name] = []
item_path = item.path
@ -192,11 +198,14 @@ class SmartPlaylistPlugin(BeetsPlugin):
item_path = os.path.relpath(item.path, relative_to)
if item_path not in m3us[m3u_name]:
m3us[m3u_name].append(item_path)
# Now iterate through the m3us that we need to generate
# Write all of the accumulated track lists to files.
for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir, m3u))
m3u_path = normpath(os.path.join(playlist_dir,
bytestring_path(m3u)))
mkdirall(m3u_path)
with open(syspath(m3u_path), 'w') as f:
with open(syspath(m3u_path), 'wb') as f:
for path in m3us[m3u]:
f.write(path + b'\n')
self._log.info(u"{0} playlists updated", len(self._matched_playlists))

View file

@ -0,0 +1,48 @@
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2018, Tobias Sauerwein.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Updates a Sonos library whenever the beets library is changed.
This is based on the Kodi Update plugin.
"""
from __future__ import division, absolute_import, print_function
from beets.plugins import BeetsPlugin
import soco
class SonosUpdate(BeetsPlugin):
def __init__(self):
super(SonosUpdate, self).__init__()
self.register_listener('database_change', self.listen_for_db_change)
def listen_for_db_change(self, lib, model):
"""Listens for beets db change and register the update"""
self.register_listener('cli_exit', self.update)
def update(self, lib):
"""When the client exists try to send refresh request to a Sonos
controler.
"""
self._log.info(u'Requesting a Sonos library update...')
device = soco.discovery.any_soco()
if device:
device.music_library.start_library_update()
else:
self._log.warning(u'Could not find a Sonos device.')
return
self._log.info(u'Sonos update triggered')

View file

@ -63,7 +63,7 @@ class SpotifyPlugin(BeetsPlugin):
self.config['show_failures'].set(True)
if self.config['mode'].get() not in ['list', 'open']:
self._log.warn(u'{0} is not a valid mode',
self._log.warning(u'{0} is not a valid mode',
self.config['mode'].get())
return False
@ -124,9 +124,8 @@ class SpotifyPlugin(BeetsPlugin):
# Apply market filter if requested
region_filter = self.config['region_filter'].get()
if region_filter:
r_data = filter(
lambda x: region_filter in x['available_markets'], r_data
)
r_data = [x for x in r_data if region_filter
in x['available_markets']]
# Simplest, take the first result
chosen_result = None
@ -155,7 +154,7 @@ class SpotifyPlugin(BeetsPlugin):
self._log.info(u'track: {0}', track)
self._log.info(u'')
else:
self._log.warn(u'{0} track(s) did not match a Spotify ID;\n'
self._log.warning(u'{0} track(s) did not match a Spotify ID;\n'
u'use --show-failures to display',
failure_count)
@ -163,7 +162,7 @@ class SpotifyPlugin(BeetsPlugin):
def output_results(self, results):
if results:
ids = map(lambda x: x['id'], results)
ids = [x['id'] for x in results]
if self.config['mode'].get() == "open":
self._log.info(u'Attempting to open Spotify with playlist')
spotify_url = self.playlist_partial + ",".join(ids)
@ -171,6 +170,6 @@ class SpotifyPlugin(BeetsPlugin):
else:
for item in ids:
print(unicode.encode(self.open_url + item))
print(self.open_url + item)
else:
self._log.warn(u'No Spotify tracks found from beets query')
self._log.warning(u'No Spotify tracks found from beets query')

View file

@ -54,14 +54,14 @@ class ThePlugin(BeetsPlugin):
self._log.error(u'invalid pattern: {0}', p)
else:
if not (p.startswith('^') or p.endswith('$')):
self._log.warn(u'warning: \"{0}\" will not '
self._log.warning(u'warning: \"{0}\" will not '
u'match string start/end', p)
if self.config['a']:
self.patterns = [PATTERN_A] + self.patterns
if self.config['the']:
self.patterns = [PATTERN_THE] + self.patterns
if not self.patterns:
self._log.warn(u'no patterns defined!')
self._log.warning(u'no patterns defined!')
def unthe(self, text, pattern):
"""Moves pattern in the path format string or strips it
@ -81,7 +81,7 @@ class ThePlugin(BeetsPlugin):
if self.config['strip']:
return r
else:
fmt = self.config['format'].get(unicode)
fmt = self.config['format'].as_str()
return fmt.format(r, t.strip()).strip()
else:
return u''

View file

@ -35,6 +35,7 @@ from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, decargs
from beets import util
from beets.util.artresizer import ArtResizer, get_im_version, get_pil_version
import six
BASE_DIR = os.path.join(BaseDirectory.xdg_cache_home, "thumbnails")
@ -162,15 +163,16 @@ class ThumbnailsPlugin(BeetsPlugin):
See http://standards.freedesktop.org/thumbnail-spec/latest/x227.html
"""
uri = self.get_uri(path)
hash = md5(uri).hexdigest()
return b"{0}.png".format(hash)
hash = md5(uri.encode('utf-8')).hexdigest()
return util.bytestring_path("{0}.png".format(hash))
def add_tags(self, album, image_path):
"""Write required metadata to the thumbnail
See http://standards.freedesktop.org/thumbnail-spec/latest/x142.html
"""
mtime = os.stat(album.artpath).st_mtime
metadata = {"Thumb::URI": self.get_uri(album.artpath),
"Thumb::MTime": unicode(os.stat(album.artpath).st_mtime)}
"Thumb::MTime": six.text_type(mtime)}
try:
self.write_metadata(image_path, metadata)
except Exception:
@ -183,7 +185,8 @@ class ThumbnailsPlugin(BeetsPlugin):
return
artfile = os.path.split(album.artpath)[1]
with open(outfilename, 'w') as f:
f.write(b"[Desktop Entry]\nIcon=./{0}".format(artfile))
f.write('[Desktop Entry]\n')
f.write('Icon=./{0}'.format(artfile.decode('utf-8')))
f.close()
self._log.debug(u"Wrote file {0}", util.displayable_path(outfilename))
@ -232,7 +235,7 @@ def copy_c_string(c_string):
# work. A more surefire way would be to allocate a ctypes buffer and copy
# the data with `memcpy` or somesuch.
s = ctypes.cast(c_string, ctypes.c_char_p).value
return '' + s
return b'' + s
class GioURI(URIGetter):
@ -271,8 +274,6 @@ class GioURI(URIGetter):
try:
uri_ptr = self.libgio.g_file_get_uri(g_file_ptr)
except:
raise
finally:
self.libgio.g_object_unref(g_file_ptr)
if not uri_ptr:
@ -282,8 +283,12 @@ class GioURI(URIGetter):
try:
uri = copy_c_string(uri_ptr)
except:
raise
finally:
self.libgio.g_free(uri_ptr)
return uri
try:
return uri.decode(util._fsencoding())
except UnicodeDecodeError:
raise RuntimeError(
"Could not decode filename from GIO: {!r}".format(uri)
)

View file

@ -24,7 +24,9 @@ import flask
from flask import g
from werkzeug.routing import BaseConverter, PathConverter
import os
from unidecode import unidecode
import json
import base64
# Utilities.
@ -37,8 +39,16 @@ def _rep(obj, expand=False):
out = dict(obj)
if isinstance(obj, beets.library.Item):
if app.config.get('INCLUDE_PATHS', False):
out['path'] = util.displayable_path(out['path'])
else:
del out['path']
# Filter all bytes attributes and convert them to strings.
for key, value in out.items():
if isinstance(out[key], bytes):
out[key] = base64.b64encode(value).decode('ascii')
# Get the size (in bytes) of the backing file. This is useful
# for the Tomahawk resolver API.
try:
@ -55,11 +65,13 @@ def _rep(obj, expand=False):
return out
def json_generator(items, root):
def json_generator(items, root, expand=False):
"""Generator that dumps list of beets Items or Albums as JSON
:param root: root key for JSON
:param items: list of :class:`Item` or :class:`Album` to dump
:param expand: If true every :class:`Album` contains its items in the json
representation
:returns: generator that yields strings
"""
yield '{"%s":[' % root
@ -69,10 +81,16 @@ def json_generator(items, root):
first = False
else:
yield ','
yield json.dumps(_rep(item))
yield json.dumps(_rep(item, expand=expand))
yield ']}'
def is_expand():
"""Returns whether the current request is for an expanded response."""
return flask.request.args.get('expand') is not None
def resource(name):
"""Decorates a function to handle RESTful HTTP requests for a resource.
"""
@ -82,7 +100,7 @@ def resource(name):
entities = [entity for entity in entities if entity]
if len(entities) == 1:
return flask.jsonify(_rep(entities[0]))
return flask.jsonify(_rep(entities[0], expand=is_expand()))
elif entities:
return app.response_class(
json_generator(entities, root=name),
@ -101,7 +119,10 @@ def resource_query(name):
def make_responder(query_func):
def responder(queries):
return app.response_class(
json_generator(query_func(queries), root='results'),
json_generator(
query_func(queries),
root='results', expand=is_expand()
),
mimetype='application/json'
)
responder.__name__ = 'query_{0}'.format(name)
@ -116,7 +137,7 @@ def resource_list(name):
def make_responder(list_all):
def responder():
return app.response_class(
json_generator(list_all(), root=name),
json_generator(list_all(), root=name, expand=is_expand()),
mimetype='application/json'
)
responder.__name__ = 'all_{0}'.format(name)
@ -162,11 +183,16 @@ class QueryConverter(PathConverter):
return ','.join(value)
class EverythingConverter(PathConverter):
regex = '.*?'
# Flask setup.
app = flask.Flask(__name__)
app.url_map.converters['idlist'] = IdListConverter
app.url_map.converters['query'] = QueryConverter
app.url_map.converters['everything'] = EverythingConverter
@app.before_request
@ -192,9 +218,34 @@ def all_items():
@app.route('/item/<int:item_id>/file')
def item_file(item_id):
item = g.lib.get_item(item_id)
response = flask.send_file(item.path, as_attachment=True,
attachment_filename=os.path.basename(item.path))
response.headers['Content-Length'] = os.path.getsize(item.path)
# On Windows under Python 2, Flask wants a Unicode path. On Python 3, it
# *always* wants a Unicode path.
if os.name == 'nt':
item_path = util.syspath(item.path)
else:
item_path = util.py3_path(item.path)
try:
unicode_item_path = util.text_string(item.path)
except (UnicodeDecodeError, UnicodeEncodeError):
unicode_item_path = util.displayable_path(item.path)
base_filename = os.path.basename(unicode_item_path)
try:
# Imitate http.server behaviour
base_filename.encode("latin-1", "strict")
except UnicodeEncodeError:
safe_filename = unidecode(base_filename)
else:
safe_filename = base_filename
response = flask.send_file(
item_path,
as_attachment=True,
attachment_filename=safe_filename
)
response.headers['Content-Length'] = os.path.getsize(item_path)
return response
@ -204,6 +255,16 @@ def item_query(queries):
return g.lib.items(queries)
@app.route('/item/path/<everything:path>')
def item_at_path(path):
query = beets.library.PathQuery('path', path.encode('utf-8'))
item = g.lib.items(query).get()
if item:
return flask.jsonify(_rep(item))
else:
return flask.abort(404)
@app.route('/item/values/<string:key>')
def item_unique_field_values(key):
sort_key = flask.request.args.get('sort_key', key)
@ -239,8 +300,8 @@ def album_query(queries):
@app.route('/album/<int:album_id>/art')
def album_art(album_id):
album = g.lib.get_album(album_id)
if album.artpath:
return flask.send_file(album.artpath)
if album and album.artpath:
return flask.send_file(album.artpath.decode())
else:
return flask.abort(404)
@ -295,6 +356,9 @@ class WebPlugin(BeetsPlugin):
'host': u'127.0.0.1',
'port': 8337,
'cors': '',
'cors_supports_credentials': False,
'reverse_proxy': False,
'include_paths': False,
})
def commands(self):
@ -310,6 +374,11 @@ class WebPlugin(BeetsPlugin):
self.config['port'] = int(args.pop(0))
app.config['lib'] = lib
# Normalizes json output
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False
app.config['INCLUDE_PATHS'] = self.config['include_paths']
# Enable CORS if required.
if self.config['cors']:
self._log.info(u'Enabling CORS with origin: {0}',
@ -319,10 +388,56 @@ class WebPlugin(BeetsPlugin):
app.config['CORS_RESOURCES'] = {
r"/*": {"origins": self.config['cors'].get(str)}
}
CORS(app)
CORS(
app,
supports_credentials=self.config[
'cors_supports_credentials'
].get(bool)
)
# Allow serving behind a reverse proxy
if self.config['reverse_proxy']:
app.wsgi_app = ReverseProxied(app.wsgi_app)
# Start the web application.
app.run(host=self.config['host'].get(unicode),
app.run(host=self.config['host'].as_str(),
port=self.config['port'].get(int),
debug=opts.debug, threaded=True)
cmd.func = func
return [cmd]
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
From: http://flask.pocoo.org/snippets/35/
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)

View file

@ -4,7 +4,7 @@ var timeFormat = function(secs) {
return '0:00';
}
secs = Math.round(secs);
var mins = '' + Math.round(secs / 60);
var mins = '' + Math.floor(secs / 60);
secs = '' + (secs % 60);
if (secs.length < 2) {
secs = '0' + secs;
@ -147,7 +147,7 @@ var BeetsRouter = Backbone.Router.extend({
},
itemQuery: function(query) {
var queryURL = query.split(/\s+/).map(encodeURIComponent).join('/');
$.getJSON('/item/query/' + queryURL, function(data) {
$.getJSON('item/query/' + queryURL, function(data) {
var models = _.map(
data['results'],
function(d) { return new Item(d); }
@ -161,7 +161,7 @@ var router = new BeetsRouter();
// Model.
var Item = Backbone.Model.extend({
urlRoot: '/item'
urlRoot: 'item'
});
var Items = Backbone.Collection.extend({
model: Item
@ -264,7 +264,7 @@ var AppView = Backbone.View.extend({
$('#extra-detail').empty().append(extraDetailView.render().el);
},
playItem: function(item) {
var url = '/item/' + item.get('id') + '/file';
var url = 'item/' + item.get('id') + '/file';
$('#player audio').attr('src', url);
$('#player audio').get(0).play();

View file

@ -82,7 +82,7 @@
<% } %>
<dt>File</dt>
<dd>
<a target="_blank" class="download" href="/item/<%= id %>/file">download</a>
<a target="_blank" class="download" href="item/<%= id %>/file">download</a>
</dd>
<% if (lyrics) { %>
<dt>Lyrics</dt>

View file

@ -16,125 +16,148 @@
""" Clears tag fields in media files."""
from __future__ import division, absolute_import, print_function
import six
import re
from beets.plugins import BeetsPlugin
from beets.mediafile import MediaFile
from beets.importer import action
from beets.ui import Subcommand, decargs, input_yn
from beets.util import confit
__author__ = 'baobab@heresiarch.info'
__version__ = '0.10'
class ZeroPlugin(BeetsPlugin):
_instance = None
def __init__(self):
super(ZeroPlugin, self).__init__()
# Listeners.
self.register_listener('write', self.write_event)
self.register_listener('import_task_choice',
self.import_task_choice_event)
self.config.add({
'auto': True,
'fields': [],
'keep_fields': [],
'update_database': False,
})
self.patterns = {}
self.fields_to_progs = {}
self.warned = False
# We'll only handle `fields` or `keep_fields`, but not both.
"""Read the bulk of the config into `self.fields_to_progs`.
After construction, `fields_to_progs` contains all the fields that
should be zeroed as keys and maps each of those to a list of compiled
regexes (progs) as values.
A field is zeroed if its value matches one of the associated progs. If
progs is empty, then the associated field is always zeroed.
"""
if self.config['fields'] and self.config['keep_fields']:
self._log.warn(u'cannot blacklist and whitelist at the same time')
self._log.warning(
u'cannot blacklist and whitelist at the same time'
)
# Blacklist mode.
if self.config['fields']:
self.validate_config('fields')
elif self.config['fields']:
for field in self.config['fields'].as_str_seq():
self.set_pattern(field)
self._set_pattern(field)
# Whitelist mode.
elif self.config['keep_fields']:
self.validate_config('keep_fields')
for field in MediaFile.fields():
if field in self.config['keep_fields'].as_str_seq():
continue
self.set_pattern(field)
if (field not in self.config['keep_fields'].as_str_seq() and
# These fields should always be preserved.
for key in ('id', 'path', 'album_id'):
if key in self.patterns:
del self.patterns[key]
field not in ('id', 'path', 'album_id')):
self._set_pattern(field)
def validate_config(self, mode):
"""Check whether fields in the configuration are valid.
def commands(self):
zero_command = Subcommand('zero', help='set fields to null')
`mode` should either be "fields" or "keep_fields", indicating
the section of the configuration to validate.
def zero_fields(lib, opts, args):
if not decargs(args) and not input_yn(
u"Remove fields for all items? (Y/n)",
True):
return
for item in lib.items(decargs(args)):
self.process_item(item)
zero_command.func = zero_fields
return [zero_command]
def _set_pattern(self, field):
"""Populate `self.fields_to_progs` for a given field.
Do some sanity checks then compile the regexes.
"""
for field in self.config[mode].as_str_seq():
if field not in MediaFile.fields():
self._log.error(u'invalid field: {0}', field)
continue
if mode == 'fields' and field in ('id', 'path', 'album_id'):
self._log.warn(u'field \'{0}\' ignored, zeroing '
elif field in ('id', 'path', 'album_id'):
self._log.warning(u'field \'{0}\' ignored, zeroing '
u'it would be dangerous', field)
continue
def set_pattern(self, field):
"""Set a field in `self.patterns` to a string list corresponding to
the configuration, or `True` if the field has no specific
configuration.
"""
else:
try:
self.patterns[field] = self.config[field].as_str_seq()
for pattern in self.config[field].as_str_seq():
prog = re.compile(pattern, re.IGNORECASE)
self.fields_to_progs.setdefault(field, []).append(prog)
except confit.NotFoundError:
# Matches everything
self.patterns[field] = True
self.fields_to_progs[field] = []
def import_task_choice_event(self, session, task):
"""Listen for import_task_choice event."""
if task.choice_flag == action.ASIS and not self.warned:
self._log.warn(u'cannot zero in \"as-is\" mode')
self._log.warning(u'cannot zero in \"as-is\" mode')
self.warned = True
# TODO request write in as-is mode
@classmethod
def match_patterns(cls, field, patterns):
"""Check if field (as string) is matching any of the patterns in
the list.
def write_event(self, item, path, tags):
if self.config['auto']:
self.set_fields(item, tags)
def set_fields(self, item, tags):
"""Set values in `tags` to `None` if the field is in
`self.fields_to_progs` and any of the corresponding `progs` matches the
field value.
Also update the `item` itself if `update_database` is set in the
config.
"""
if patterns is True:
return True
for p in patterns:
if re.search(p, unicode(field), flags=re.IGNORECASE):
return True
fields_set = False
if not self.fields_to_progs:
self._log.warning(u'no fields, nothing to do')
return False
def write_event(self, item, path, tags):
"""Set values in tags to `None` if the key and value are matched
by `self.patterns`.
"""
if not self.patterns:
self._log.warn(u'no fields, nothing to do')
return
for field, patterns in self.patterns.items():
for field, progs in self.fields_to_progs.items():
if field in tags:
value = tags[field]
match = self.match_patterns(tags[field], patterns)
match = _match_progs(tags[field], progs)
else:
value = ''
match = patterns is True
match = not progs
if match:
fields_set = True
self._log.debug(u'{0}: {1} -> None', field, value)
tags[field] = None
if self.config['update_database']:
item[field] = None
return fields_set
def process_item(self, item):
tags = dict(item)
if self.set_fields(item, tags):
item.write(tags=tags)
if self.config['update_database']:
item.store(fields=tags)
def _match_progs(value, progs):
"""Check if `value` (as string) is matching any of the compiled regexes in
the `progs` list.
"""
if not progs:
return True
for prog in progs:
if prog.search(six.text_type(value)):
return True
return False

BIN
libs/bin/beet.exe Normal file

Binary file not shown.

16
libs/bin/mid3cp Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mid3cp import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/mid3iconv Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mid3iconv import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/mid3v2 Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mid3v2 import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/moggsplit Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.moggsplit import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/mutagen-inspect Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mutagen_inspect import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

16
libs/bin/mutagen-pony Normal file
View file

@ -0,0 +1,16 @@
#!h:\src\env\nzbtomedia\scripts\python.exe
# -*- coding: utf-8 -*-
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from mutagen._tools.mutagen_pony import entry_point
if __name__ == "__main__":
sys.exit(entry_point())

BIN
libs/bin/unidecode.exe Normal file

Binary file not shown.

View file

@ -3,5 +3,4 @@ from .initialise import init, deinit, reinit, colorama_text
from .ansi import Fore, Back, Style, Cursor
from .ansitowin32 import AnsiToWin32
__version__ = '0.3.7'
__version__ = '0.4.1'

View file

@ -13,14 +13,6 @@ if windll is not None:
winterm = WinTerm()
def is_stream_closed(stream):
return not hasattr(stream, 'closed') or stream.closed
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
@ -36,9 +28,38 @@ class StreamWrapper(object):
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def __enter__(self, *args, **kwargs):
# special method lookup bypasses __getattr__/__getattribute__, see
# https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit
# thus, contextlib magic methods are not proxied via __getattr__
return self.__wrapped.__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
return self.__wrapped.__exit__(*args, **kwargs)
def write(self, text):
self.__convertor.write(text)
def isatty(self):
stream = self.__wrapped
if 'PYCHARM_HOSTED' in os.environ:
if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):
return True
try:
stream_isatty = stream.isatty
except AttributeError:
return False
else:
return stream_isatty()
@property
def closed(self):
stream = self.__wrapped
try:
return stream.closed
except AttributeError:
return True
class AnsiToWin32(object):
'''
@ -46,8 +67,8 @@ class AnsiToWin32(object):
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_CSI_RE = re.compile('\001?\033\[((?:\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\]((?:.|;)*?)(\x07)\002?') # Operating System Command
ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
ANSI_OSC_RE = re.compile('\001?\033\\]((?:.|;)*?)(\x07)\002?') # Operating System Command
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
@ -68,12 +89,12 @@ class AnsiToWin32(object):
# should we strip ANSI sequences from our output?
if strip is None:
strip = conversion_supported or (not is_stream_closed(wrapped) and not is_a_tty(wrapped))
strip = conversion_supported or (not self.stream.closed and not self.stream.isatty())
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = conversion_supported and not is_stream_closed(wrapped) and is_a_tty(wrapped)
convert = conversion_supported and not self.stream.closed and self.stream.isatty()
self.convert = convert
# dict of ansi codes to win32 functions and parameters
@ -149,7 +170,7 @@ class AnsiToWin32(object):
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.strip and not is_stream_closed(self.wrapped):
elif not self.strip and not self.stream.closed:
self.wrapped.write(Style.RESET_ALL)

View file

@ -78,5 +78,3 @@ def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrapper.should_wrap():
stream = wrapper.stream
return stream

View file

@ -83,33 +83,31 @@ else:
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
wintypes.LPCWSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def winapi_test():
handle = handles[STDOUT]
def _winapi_test(handle):
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def winapi_test():
return any(_winapi_test(h) for h in
(_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
handle = _GetStdHandle(stream_id)
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
handle = _GetStdHandle(stream_id)
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
@ -127,11 +125,11 @@ else:
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
handle = _GetStdHandle(stream_id)
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
handle = _GetStdHandle(stream_id)
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
@ -142,7 +140,7 @@ else:
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
handle = _GetStdHandle(stream_id)
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)

View file

@ -44,6 +44,7 @@ class WinTerm(object):
def reset_all(self, on_stderr=None):
self.set_attrs(self._default)
self.set_console(attrs=self._default)
self._light = 0
def fore(self, fore=None, light=False, on_stderr=False):
if fore is None:
@ -122,12 +123,15 @@ class WinTerm(object):
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = cells_in_screen - cells_before_cursor
if mode == 1:
elif mode == 1:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_before_cursor
elif mode == 2:
from_coord = win32.COORD(0, 0)
cells_to_erase = cells_in_screen
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly
@ -147,12 +151,15 @@ class WinTerm(object):
if mode == 0:
from_coord = csbi.dwCursorPosition
cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
if mode == 1:
elif mode == 1:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwCursorPosition.X
elif mode == 2:
from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
cells_to_erase = csbi.dwSize.X
else:
# invalid mode
return
# fill the entire screen with blanks
win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
# now set the buffer's attributes accordingly

View file

@ -1,4 +1,6 @@
try:
from .cjellyfish import * # noqa
library = "C"
except ImportError:
from ._jellyfish import * # noqa
library = "Python"

View file

@ -1,6 +1,6 @@
import unicodedata
from collections import defaultdict
from .compat import _range, _zip_longest, _no_bytes_err
from .compat import _range, _zip_longest, IS_PY3
from .porter import Stemmer
@ -8,9 +8,16 @@ def _normalize(s):
return unicodedata.normalize('NFKD', s)
def _check_type(s):
if IS_PY3 and not isinstance(s, str):
raise TypeError('expected str or unicode, got %s' % type(s).__name__)
elif not IS_PY3 and not isinstance(s, unicode):
raise TypeError('expected unicode, got %s' % type(s).__name__)
def levenshtein_distance(s1, s2):
if isinstance(s1, bytes) or isinstance(s2, bytes):
raise TypeError(_no_bytes_err)
_check_type(s1)
_check_type(s2)
if s1 == s2:
return 0
@ -36,14 +43,14 @@ def levenshtein_distance(s1, s2):
def _jaro_winkler(ying, yang, long_tolerance, winklerize):
if isinstance(ying, bytes) or isinstance(yang, bytes):
raise TypeError(_no_bytes_err)
_check_type(ying)
_check_type(yang)
ying_len = len(ying)
yang_len = len(yang)
if not ying_len or not yang_len:
return 0
return 0.0
min_len = max(ying_len, yang_len)
search_range = (min_len // 2) - 1
@ -66,7 +73,7 @@ def _jaro_winkler(ying, yang, long_tolerance, winklerize):
# short circuit if no characters match
if not common_chars:
return 0
return 0.0
# count transpositions
k = trans_count = 0
@ -106,8 +113,8 @@ def _jaro_winkler(ying, yang, long_tolerance, winklerize):
def damerau_levenshtein_distance(s1, s2):
if isinstance(s1, bytes) or isinstance(s2, bytes):
raise TypeError(_no_bytes_err)
_check_type(s1)
_check_type(s2)
len1 = len(s1)
len2 = len(s2)
@ -155,25 +162,27 @@ def jaro_winkler(s1, s2, long_tolerance=False):
def soundex(s):
_check_type(s)
if not s:
return s
if isinstance(s, bytes):
raise TypeError(_no_bytes_err)
return ''
s = _normalize(s)
s = s.upper()
replacements = (('bfpv', '1'),
('cgjkqsxz', '2'),
('dt', '3'),
('l', '4'),
('mn', '5'),
('r', '6'))
replacements = (('BFPV', '1'),
('CGJKQSXZ', '2'),
('DT', '3'),
('L', '4'),
('MN', '5'),
('R', '6'))
result = [s[0]]
count = 1
# find would-be replacment for first character
for lset, sub in replacements:
if s[0].lower() in lset:
if s[0] in lset:
last = sub
break
else:
@ -181,7 +190,7 @@ def soundex(s):
for letter in s[1:]:
for lset, sub in replacements:
if letter.lower() in lset:
if letter in lset:
if sub != last:
result.append(sub)
count += 1
@ -197,8 +206,8 @@ def soundex(s):
def hamming_distance(s1, s2):
if isinstance(s1, bytes) or isinstance(s2, bytes):
raise TypeError(_no_bytes_err)
_check_type(s1)
_check_type(s2)
# ensure length of s1 >= s2
if len(s2) > len(s1):
@ -214,8 +223,9 @@ def hamming_distance(s1, s2):
def nysiis(s):
if isinstance(s, bytes):
raise TypeError(_no_bytes_err)
_check_type(s)
if not s:
return ''
@ -303,8 +313,8 @@ def nysiis(s):
def match_rating_codex(s):
if isinstance(s, bytes):
raise TypeError(_no_bytes_err)
_check_type(s)
s = s.upper()
codex = []
@ -368,8 +378,7 @@ def match_rating_comparison(s1, s2):
def metaphone(s):
if isinstance(s, bytes):
raise TypeError(_no_bytes_err)
_check_type(s)
result = []
@ -457,8 +466,9 @@ def metaphone(s):
elif c == 'w':
if i == 0 and next == 'h':
i += 1
next = s[i+1]
if next in 'aeiou':
if nextnext in 'aeiou' or nextnext == '*****':
result.append('w')
elif next in 'aeiou' or next == '*****':
result.append('w')
elif c == 'x':
if i == 0:
@ -484,6 +494,6 @@ def metaphone(s):
def porter_stem(s):
if isinstance(s, bytes):
raise TypeError(_no_bytes_err)
_check_type(s)
return Stemmer(s).stem()

Some files were not shown because too many files have changed in this diff Show more