diff --git a/.gitignore b/.gitignore
index 2f3df66af..40e3e1a7e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,6 +6,7 @@
.svn
.tox
.coverage
+.idea
# file patterns
@@ -22,6 +23,7 @@
*.project
*.pydevproject
*.ropeproject
+*.orig
# Project Specific patterns
diff --git a/.hgignore b/.hgignore
index a500e2d9e..33acb4da3 100644
--- a/.hgignore
+++ b/.hgignore
@@ -4,3 +4,4 @@
^MANIFEST$
^docs/_build/
^\.tox/
+^\.idea/
diff --git a/beets/__init__.py b/beets/__init__.py
index a2a0bfbd5..c912785d5 100644
--- a/beets/__init__.py
+++ b/beets/__init__.py
@@ -12,7 +12,7 @@
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
-__version__ = '1.3.8'
+__version__ = '1.3.9'
__author__ = 'Adrian Sampson '
import beets.library
diff --git a/beets/autotag/match.py b/beets/autotag/match.py
index aa0c21dba..2d1f20074 100644
--- a/beets/autotag/match.py
+++ b/beets/autotag/match.py
@@ -65,10 +65,10 @@ def current_metadata(items):
fields = ['artist', 'album', 'albumartist', 'year', 'disctotal',
'mb_albumid', 'label', 'catalognum', 'country', 'media',
'albumdisambig']
- for key in fields:
- values = [getattr(item, key) for item in items if item]
- likelies[key], freq = plurality(values)
- consensus[key] = (freq == len(values))
+ for field in fields:
+ values = [item[field] for item in items if item]
+ likelies[field], freq = plurality(values)
+ consensus[field] = (freq == len(values))
# If there's an album artist consensus, use this for the artist.
if consensus['albumartist'] and likelies['albumartist']:
@@ -261,16 +261,16 @@ def match_by_id(items):
# Is there a consensus on the MB album ID?
albumids = [item.mb_albumid for item in items if item.mb_albumid]
if not albumids:
- log.debug('No album IDs found.')
+ log.debug(u'No album IDs found.')
return None
# If all album IDs are equal, look up the album.
if bool(reduce(lambda x, y: x if x == y else (), albumids)):
albumid = albumids[0]
- log.debug('Searching for discovered album ID: ' + albumid)
+ log.debug(u'Searching for discovered album ID: {0}'.format(albumid))
return hooks.album_for_mbid(albumid)
else:
- log.debug('No album ID consensus.')
+ log.debug(u'No album ID consensus.')
def _recommendation(results):
@@ -330,7 +330,7 @@ def _add_candidate(items, results, info):
checking the track count, ordering the items, checking for
duplicates, and calculating the distance.
"""
- log.debug('Candidate: %s - %s' % (info.artist, info.album))
+ log.debug(u'Candidate: {0} - {1}'.format(info.artist, info.album))
# Discard albums with zero tracks.
if not info.tracks:
@@ -339,13 +339,13 @@ def _add_candidate(items, results, info):
# Don't duplicate.
if info.album_id in results:
- log.debug('Duplicate.')
+ log.debug(u'Duplicate.')
return
# Discard matches without required tags.
for req_tag in config['match']['required'].as_str_seq():
if getattr(info, req_tag) is None:
- log.debug('Ignored. Missing required tag: %s' % req_tag)
+ log.debug(u'Ignored. Missing required tag: {0}'.format(req_tag))
return
# Find mapping between the items and the track info.
@@ -358,31 +358,36 @@ def _add_candidate(items, results, info):
penalties = [key for _, key in dist]
for penalty in config['match']['ignored'].as_str_seq():
if penalty in penalties:
- log.debug('Ignored. Penalty: %s' % penalty)
+ log.debug(u'Ignored. Penalty: {0}'.format(penalty))
return
- log.debug('Success. Distance: %f' % dist)
+ log.debug(u'Success. Distance: {0}'.format(dist))
results[info.album_id] = hooks.AlbumMatch(dist, info, mapping,
extra_items, extra_tracks)
def tag_album(items, search_artist=None, search_album=None,
search_id=None):
- """Bundles together the functionality used to infer tags for a
- set of items comprised by an album. Returns everything relevant:
- - The current artist.
- - The current album.
- - A list of AlbumMatch objects. The candidates are sorted by
- distance (i.e., best match first).
- - A :class:`Recommendation`.
- If search_artist and search_album or search_id are provided, then
- they are used as search terms in place of the current metadata.
+ """Return a tuple of a artist name, an album name, a list of
+ `AlbumMatch` candidates from the metadata backend, and a
+ `Recommendation`.
+
+ The artist and album are the most common values of these fields
+ among `items`.
+
+ The `AlbumMatch` objects are generated by searching the metadata
+ backends. By default, the metadata of the items is used for the
+ search. This can be customized by setting the parameters. The
+ `mapping` field of the album has the matched `items` as keys.
+
+ The recommendation is calculated from the match qualitiy of the
+ candidates.
"""
# Get current metadata.
likelies, consensus = current_metadata(items)
cur_artist = likelies['artist']
cur_album = likelies['album']
- log.debug('Tagging %s - %s' % (cur_artist, cur_album))
+ log.debug(u'Tagging {0} - {1}'.format(cur_artist, cur_album))
# The output result (distance, AlbumInfo) tuples (keyed by MB album
# ID).
@@ -390,7 +395,7 @@ def tag_album(items, search_artist=None, search_album=None,
# Search by explicit ID.
if search_id is not None:
- log.debug('Searching for album ID: ' + search_id)
+ log.debug(u'Searching for album ID: {0}'.format(search_id))
search_cands = hooks.albums_for_id(search_id)
# Use existing metadata or text search.
@@ -400,32 +405,33 @@ def tag_album(items, search_artist=None, search_album=None,
if id_info:
_add_candidate(items, candidates, id_info)
rec = _recommendation(candidates.values())
- log.debug('Album ID match recommendation is ' + str(rec))
+ log.debug(u'Album ID match recommendation is {0}'.format(str(rec)))
if candidates and not config['import']['timid']:
# If we have a very good MBID match, return immediately.
# Otherwise, this match will compete against metadata-based
# matches.
if rec == Recommendation.strong:
- log.debug('ID match.')
+ log.debug(u'ID match.')
return cur_artist, cur_album, candidates.values(), rec
# Search terms.
if not (search_artist and search_album):
# No explicit search terms -- use current metadata.
search_artist, search_album = cur_artist, cur_album
- log.debug(u'Search terms: %s - %s' % (search_artist, search_album))
+ log.debug(u'Search terms: {0} - {1}'.format(search_artist,
+ search_album))
# Is this album likely to be a "various artist" release?
va_likely = ((not consensus['artist']) or
(search_artist.lower() in VA_ARTISTS) or
any(item.comp for item in items))
- log.debug(u'Album might be VA: %s' % str(va_likely))
+ log.debug(u'Album might be VA: {0}'.format(str(va_likely)))
# Get the results from the data sources.
search_cands = hooks.album_candidates(items, search_artist,
search_album, va_likely)
- log.debug(u'Evaluating %i candidates.' % len(search_cands))
+ log.debug(u'Evaluating {0} candidates.'.format(len(search_cands)))
for info in search_cands:
_add_candidate(items, candidates, info)
@@ -450,7 +456,7 @@ def tag_item(item, search_artist=None, search_title=None,
# First, try matching by MusicBrainz ID.
trackid = search_id or item.mb_trackid
if trackid:
- log.debug('Searching for track ID: ' + trackid)
+ log.debug(u'Searching for track ID: {0}'.format(trackid))
for track_info in hooks.tracks_for_id(trackid):
dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = \
@@ -458,7 +464,7 @@ def tag_item(item, search_artist=None, search_title=None,
# If this is a good match, then don't keep searching.
rec = _recommendation(candidates.values())
if rec == Recommendation.strong and not config['import']['timid']:
- log.debug('Track ID match.')
+ log.debug(u'Track ID match.')
return candidates.values(), rec
# If we're searching by ID, don't proceed.
@@ -471,7 +477,8 @@ def tag_item(item, search_artist=None, search_title=None,
# Search terms.
if not (search_artist and search_title):
search_artist, search_title = item.artist, item.title
- log.debug(u'Item search terms: %s - %s' % (search_artist, search_title))
+ log.debug(u'Item search terms: {0} - {1}'.format(search_artist,
+ search_title))
# Get and evaluate candidate metadata.
for track_info in hooks.item_candidates(item, search_artist, search_title):
@@ -479,7 +486,7 @@ def tag_item(item, search_artist=None, search_title=None,
candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info)
# Sort by distance and return with recommendation.
- log.debug('Found %i candidates.' % len(candidates))
+ log.debug(u'Found {0} candidates.'.format(len(candidates)))
candidates = sorted(candidates.itervalues())
rec = _recommendation(candidates)
return candidates, rec
diff --git a/beets/autotag/mb.py b/beets/autotag/mb.py
index 508a5a43a..d7afbc52b 100644
--- a/beets/autotag/mb.py
+++ b/beets/autotag/mb.py
@@ -372,13 +372,13 @@ def album_for_id(releaseid):
"""
albumid = _parse_id(releaseid)
if not albumid:
- log.debug('Invalid MBID (%s).' % (releaseid))
+ log.debug(u'Invalid MBID ({0}).'.format(releaseid))
return
try:
res = musicbrainzngs.get_release_by_id(albumid,
RELEASE_INCLUDES)
except musicbrainzngs.ResponseError:
- log.debug('Album ID match failed.')
+ log.debug(u'Album ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'get release by ID', albumid,
@@ -392,12 +392,12 @@ def track_for_id(releaseid):
"""
trackid = _parse_id(releaseid)
if not trackid:
- log.debug('Invalid MBID (%s).' % (releaseid))
+ log.debug(u'Invalid MBID ({0}).'.format(releaseid))
return
try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError:
- log.debug('Track ID match failed.')
+ log.debug(u'Track ID match failed.')
return None
except musicbrainzngs.MusicBrainzError as exc:
raise MusicBrainzAPIError(exc, 'get recording by ID', trackid,
diff --git a/beets/config_default.yaml b/beets/config_default.yaml
index 1854b103d..47afe70ce 100644
--- a/beets/config_default.yaml
+++ b/beets/config_default.yaml
@@ -56,8 +56,8 @@ list_format_item: $artist - $album - $title
list_format_album: $albumartist - $album
time_format: '%Y-%m-%d %H:%M:%S'
-sort_album: smartartist+
-sort_item: smartartist+
+sort_album: albumartist+ album+
+sort_item: artist+ album+ disc+ track+
paths:
default: $albumartist/$album%aunique{}/$track $title
diff --git a/beets/dbcore/__init__.py b/beets/dbcore/__init__.py
index fdf6b4695..c364fdfc3 100644
--- a/beets/dbcore/__init__.py
+++ b/beets/dbcore/__init__.py
@@ -20,5 +20,6 @@ from .query import Query, FieldQuery, MatchQuery, AndQuery, OrQuery
from .types import Type
from .queryparse import query_from_strings
from .queryparse import sort_from_strings
+from .queryparse import parse_sorted_query
# flake8: noqa
diff --git a/beets/dbcore/db.py b/beets/dbcore/db.py
index 0ec24dfd6..5392688e0 100644
--- a/beets/dbcore/db.py
+++ b/beets/dbcore/db.py
@@ -24,8 +24,8 @@ import collections
import beets
from beets.util.functemplate import Template
-from .query import MatchQuery, NullSort
-from .types import BASE_TYPE
+from beets.dbcore import types
+from .query import MatchQuery, NullSort, TrueQuery
class FormattedMapping(collections.Mapping):
@@ -115,11 +115,6 @@ class Model(object):
keys are field names and the values are `Type` objects.
"""
- _bytes_keys = ()
- """Keys whose values should be stored as raw bytes blobs rather than
- strings.
- """
-
_search_fields = ()
"""The fields that should be queried by default by unqualified query
terms.
@@ -129,6 +124,11 @@ class Model(object):
"""Optional Types for non-fixed (i.e., flexible and computed) fields.
"""
+ _sorts = {}
+ """Optional named sort criteria. The keys are strings and the values
+ are subclasses of `Sort`.
+ """
+
@classmethod
def _getters(cls):
"""Return a mapping from field names to getter functions.
@@ -160,21 +160,17 @@ class Model(object):
self.clear_dirty()
@classmethod
- def _awaken(cls, db=None, fixed_values=None, flex_values=None):
+ def _awaken(cls, db=None, fixed_values={}, flex_values={}):
"""Create an object with values drawn from the database.
This is a performance optimization: the checks involved with
ordinary construction are bypassed.
"""
obj = cls(db)
- if fixed_values:
- for key, value in fixed_values.items():
- obj._values_fixed[key] = cls._fields[key].normalize(value)
- if flex_values:
- for key, value in flex_values.items():
- if key in cls._types:
- value = cls._types[key].normalize(value)
- obj._values_flex[key] = value
+ for key, value in fixed_values.iteritems():
+ obj._values_fixed[key] = cls._type(key).from_sql(value)
+ for key, value in flex_values.iteritems():
+ obj._values_flex[key] = cls._type(key).from_sql(value)
return obj
def __repr__(self):
@@ -208,7 +204,7 @@ class Model(object):
If the field has no explicit type, it is given the base `Type`,
which does no conversion.
"""
- return self._fields.get(key) or self._types.get(key) or BASE_TYPE
+ return self._fields.get(key) or self._types.get(key) or types.DEFAULT
def __getitem__(self, key):
"""Get the value for a field. Raise a KeyError if the field is
@@ -332,19 +328,15 @@ class Model(object):
self._check_db()
# Build assignments for query.
- assignments = ''
+ assignments = []
subvars = []
for key in self._fields:
if key != 'id' and key in self._dirty:
self._dirty.remove(key)
- assignments += key + '=?,'
- value = self[key]
- # Wrap path strings in buffers so they get stored
- # "in the raw".
- if key in self._bytes_keys and isinstance(value, str):
- value = buffer(value)
+ assignments.append(key + '=?')
+ value = self._type(key).to_sql(self[key])
subvars.append(value)
- assignments = assignments[:-1] # Knock off last ,
+ assignments = ','.join(assignments)
with self._db.transaction() as tx:
# Main table update.
@@ -737,7 +729,7 @@ class Database(object):
id INTEGER PRIMARY KEY,
entity_id INTEGER,
key TEXT,
- value NONE,
+ value TEXT,
UNIQUE(entity_id, key) ON CONFLICT REPLACE);
CREATE INDEX IF NOT EXISTS {0}_by_entity
ON {0} (entity_id);
@@ -745,14 +737,15 @@ class Database(object):
# Querying.
- def _fetch(self, model_cls, query, sort=None):
+ def _fetch(self, model_cls, query=None, sort=None):
"""Fetch the objects of type `model_cls` matching the given
query. The query may be given as a string, string sequence, a
Query object, or None (to fetch everything). `sort` is an
- optional Sort object.
+ `Sort` object.
"""
+ query = query or TrueQuery() # A null query.
+ sort = sort or NullSort() # Unsorted.
where, subvals = query.clause()
- sort = sort or NullSort()
order_by = sort.order_clause()
sql = ("SELECT * FROM {0} WHERE {1} {2}").format(
diff --git a/beets/dbcore/query.py b/beets/dbcore/query.py
index 1f1a9a26a..5a116eb2b 100644
--- a/beets/dbcore/query.py
+++ b/beets/dbcore/query.py
@@ -18,10 +18,6 @@ import re
from operator import attrgetter
from beets import util
from datetime import datetime, timedelta
-from collections import namedtuple
-
-
-SortedQuery = namedtuple('SortedQuery', ['query', 'sort'])
class Query(object):
@@ -87,6 +83,23 @@ class MatchQuery(FieldQuery):
return pattern == value
+class NoneQuery(FieldQuery):
+
+ def __init__(self, field, fast=True):
+ self.field = field
+ self.fast = fast
+
+ def col_clause(self):
+ return self.field + " IS NULL", ()
+
+ @classmethod
+ def match(self, item):
+ try:
+ return item[self.field] is None
+ except KeyError:
+ return True
+
+
class StringFieldQuery(FieldQuery):
"""A FieldQuery that converts values to strings before matching
them.
@@ -405,10 +418,14 @@ class Period(object):
return None
ordinal = string.count('-')
if ordinal >= len(cls.date_formats):
- raise ValueError('date is not in one of the formats '
- + ', '.join(cls.date_formats))
+ # Too many components.
+ return None
date_format = cls.date_formats[ordinal]
- date = datetime.strptime(string, date_format)
+ try:
+ date = datetime.strptime(string, date_format)
+ except ValueError:
+ # Parsing failed.
+ return None
precision = cls.precisions[ordinal]
return cls(date, precision)
@@ -623,25 +640,6 @@ class FixedFieldSort(FieldSort):
return "{0} {1}".format(self.field, order)
-class SmartArtistSort(Sort):
- """Sort by artist (either album artist or track artist),
- prioritizing the sort field over the raw field.
- """
- def __init__(self, model_cls, is_ascending=True):
- self.model_cls = model_cls
- self.is_ascending = is_ascending
-
- def order_clause(self):
- order = "ASC" if self.is_ascending else "DESC"
- if 'albumartist' in self.model_cls._fields:
- field = 'albumartist'
- else:
- field = 'artist'
- return ('(CASE {0}_sort WHEN NULL THEN {0} '
- 'WHEN "" THEN {0} '
- 'ELSE {0}_sort END) {1}').format(field, order)
-
-
class SlowFieldSort(FieldSort):
"""A sort criterion by some model field other than a fixed field:
i.e., a computed or flexible field.
@@ -654,6 +652,3 @@ class NullSort(Sort):
"""No sorting. Leave results unsorted."""
def sort(items):
return items
-
- def __nonzero__(self):
- return False
diff --git a/beets/dbcore/queryparse.py b/beets/dbcore/queryparse.py
index 8a50e04d5..90963696b 100644
--- a/beets/dbcore/queryparse.py
+++ b/beets/dbcore/queryparse.py
@@ -136,11 +136,10 @@ def construct_sort_part(model_cls, part):
assert direction in ('+', '-'), "part must end with + or -"
is_ascending = direction == '+'
- if field in model_cls._fields:
+ if field in model_cls._sorts:
+ sort = model_cls._sorts[field](model_cls, is_ascending)
+ elif field in model_cls._fields:
sort = query.FixedFieldSort(field, is_ascending)
- elif field == 'smartartist':
- # Special case for smart artist sort.
- sort = query.SmartArtistSort(model_cls, is_ascending)
else:
# Flexible or computed.
sort = query.SlowFieldSort(field, is_ascending)
@@ -157,3 +156,25 @@ def sort_from_strings(model_cls, sort_parts):
for part in sort_parts:
sort.add_sort(construct_sort_part(model_cls, part))
return sort
+
+
+def parse_sorted_query(model_cls, parts, prefixes={},
+ query_cls=query.AndQuery):
+ """Given a list of strings, create the `Query` and `Sort` that they
+ represent.
+ """
+ # Separate query token and sort token.
+ query_parts = []
+ sort_parts = []
+ for part in parts:
+ if part.endswith((u'+', u'-')) and u':' not in part:
+ sort_parts.append(part)
+ else:
+ query_parts.append(part)
+
+ # Parse each.
+ q = query_from_strings(
+ query_cls, model_cls, prefixes, query_parts
+ )
+ s = sort_from_strings(model_cls, sort_parts)
+ return q, s
diff --git a/beets/dbcore/types.py b/beets/dbcore/types.py
index 3c56121d0..82346e704 100644
--- a/beets/dbcore/types.py
+++ b/beets/dbcore/types.py
@@ -34,30 +34,42 @@ class Type(object):
"""The `Query` subclass to be used when querying the field.
"""
- null = None
- """The value to be exposed when the underlying value is None.
+ model_type = unicode
+ """The Python type that is used to represent the value in the model.
+
+ The model is guaranteed to return a value of this type if the field
+ is accessed. To this end, the constructor is used by the `normalize`
+ and `from_sql` methods and the `default` property.
"""
+ @property
+ def null(self):
+ """The value to be exposed when the underlying value is None.
+ """
+ return self.model_type()
+
def format(self, value):
"""Given a value of this type, produce a Unicode string
representing the value. This is used in template evaluation.
"""
- # Fallback formatter. Convert to Unicode at all cost.
if value is None:
- return u''
- elif isinstance(value, basestring):
- if isinstance(value, bytes):
- return value.decode('utf8', 'ignore')
- else:
- return value
- else:
- return unicode(value)
+ value = self.null
+ # `self.null` might be `None`
+ if value is None:
+ value = u''
+ if isinstance(value, bytes):
+ value = value.decode('utf8', 'ignore')
+
+ return unicode(value)
def parse(self, string):
"""Parse a (possibly human-written) string and return the
indicated value of this type.
"""
- return string
+ try:
+ return self.model_type(string)
+ except ValueError:
+ return self.null
def normalize(self, value):
"""Given a value that will be assigned into a field of this
@@ -67,26 +79,50 @@ class Type(object):
if value is None:
return self.null
else:
+ # TODO This should eventually be replaced by
+ # `self.model_type(value)`
return value
+ def from_sql(self, sql_value):
+ """Receives the value stored in the SQL backend and return the
+ value to be stored in the model.
+
+ For fixed fields the type of `value` is determined by the column
+ type affinity given in the `sql` property and the SQL to Python
+ mapping of the database adapter. For more information see:
+ http://www.sqlite.org/datatype3.html
+ https://docs.python.org/2/library/sqlite3.html#sqlite-and-python-types
+
+ Flexible fields have the type afinity `TEXT`. This means the
+ `sql_value` is either a `buffer` or a `unicode` object` and the
+ method must handle these in addition.
+ """
+ if isinstance(sql_value, buffer):
+ sql_value = bytes(sql_value).decode('utf8', 'ignore')
+ if isinstance(sql_value, unicode):
+ return self.parse(sql_value)
+ else:
+ return self.normalize(sql_value)
+
+ def to_sql(self, model_value):
+ """Convert a value as stored in the model object to a value used
+ by the database adapter.
+ """
+ return model_value
+
# Reusable types.
+class Default(Type):
+ null = None
+
+
class Integer(Type):
"""A basic integer type.
"""
sql = u'INTEGER'
query = query.NumericQuery
- null = 0
-
- def format(self, value):
- return unicode(value or 0)
-
- def parse(self, string):
- try:
- return int(string)
- except ValueError:
- return 0
+ model_type = int
class PaddedInt(Integer):
@@ -128,17 +164,11 @@ class Float(Type):
"""
sql = u'REAL'
query = query.NumericQuery
- null = 0.0
+ model_type = float
def format(self, value):
return u'{0:.1f}'.format(value or 0.0)
- def parse(self, string):
- try:
- return float(string)
- except ValueError:
- return 0.0
-
class NullFloat(Float):
"""Same as `Float`, but does not normalize `None` to `0.0`.
@@ -151,13 +181,6 @@ class String(Type):
"""
sql = u'TEXT'
query = query.SubstringQuery
- null = u''
-
- def format(self, value):
- return unicode(value) if value else u''
-
- def parse(self, string):
- return string
class Boolean(Type):
@@ -165,7 +188,7 @@ class Boolean(Type):
"""
sql = u'INTEGER'
query = query.BooleanQuery
- null = False
+ model_type = bool
def format(self, value):
return unicode(bool(value))
@@ -175,7 +198,7 @@ class Boolean(Type):
# Shared instances of common types.
-BASE_TYPE = Type()
+DEFAULT = Default()
INTEGER = Integer()
PRIMARY_ID = Id(True)
FOREIGN_ID = Id(False)
diff --git a/beets/importer.py b/beets/importer.py
index f776b1a01..e9c92cf8f 100644
--- a/beets/importer.py
+++ b/beets/importer.py
@@ -81,7 +81,7 @@ def _save_state(state):
with open(config['statefile'].as_filename(), 'w') as f:
pickle.dump(state, f)
except IOError as exc:
- log.error(u'state file could not be written: %s' % unicode(exc))
+ log.error(u'state file could not be written: {0}'.format(exc))
# Utilities for reading and writing the beets progress file, which
@@ -278,10 +278,8 @@ class ImportSession(object):
# Split directory tasks into one task for each album
stages += [group_albums(self)]
if self.config['autotag']:
- # Only look up and query the user when autotagging.
-
# FIXME We should also resolve duplicates when not
- # autotagging.
+ # autotagging. This is currently handled in `user_query`
stages += [lookup_candidates(self), user_query(self)]
else:
stages += [import_asis(self)]
@@ -339,7 +337,8 @@ class ImportSession(object):
# Either accept immediately or prompt for input to decide.
if self.want_resume is True or \
self.should_resume(toppath):
- log.warn('Resuming interrupted import of %s' % toppath)
+ log.warn(u'Resuming interrupted import of {0}'.format(
+ util.displayable_path(toppath)))
self._is_resuming[toppath] = True
else:
# Clear progress; we're starting from the top.
@@ -351,24 +350,45 @@ class ImportSession(object):
class ImportTask(object):
"""Represents a single set of items to be imported along with its
intermediate state. May represent an album or a single item.
+
+ The import session and stages call the following methods in the
+ given order.
+
+ * `lookup_candidates()` Sets the `common_artist`, `common_album`,
+ `candidates`, and `rec` attributes. `candidates` is a list of
+ `AlbumMatch` objects.
+
+ * `choose_match()` Uses the session to set the `match` attribute
+ from the `candidates` list.
+
+ * `find_duplicates()` Returns a list of albums from `lib` with the
+ same artist and album name as the task.
+
+ * `apply_metadata()` Sets the attributes of the items from the
+ task's `match` attribute.
+
+ * `add()` Add the imported items and album to the database.
+
+ * `manipulate_files()` Copy, move, and write files depending on the
+ session configuration.
+
+ * `finalize()` Update the import progress and cleanup the file
+ system.
"""
def __init__(self, toppath=None, paths=None, items=None):
self.toppath = toppath
self.paths = paths
self.items = items
self.choice_flag = None
+
+ self.cur_album = None
+ self.cur_artist = None
+ self.candidates = []
+ self.rec = None
# TODO remove this eventually
self.should_remove_duplicates = False
self.is_album = True
- def set_null_candidates(self):
- """Set the candidates to indicate no album match was found.
- """
- self.cur_artist = None
- self.cur_album = None
- self.candidates = None
- self.rec = None
-
def set_choice(self, choice):
"""Given an AlbumMatch or TrackMatch object or an action constant,
indicates that an action has been selected for this task.
@@ -422,12 +442,15 @@ class ImportTask(object):
def imported_items(self):
"""Return a list of Items that should be added to the library.
- If this is an album task, return the list of items in the
- selected match or everything if the choice is ASIS. If this is a
- singleton task, return a list containing the item.
+
+ If the tasks applies an album match the method only returns the
+ matched items.
"""
if self.choice_flag == action.ASIS:
return list(self.items)
+ # FIXME this should be a simple attribute. There should be no
+ # need to retrieve the keys of `match.mapping`. This requires
+ # that we remove unmatched items from the list.
elif self.choice_flag == action.APPLY:
return self.match.mapping.keys()
else:
@@ -436,6 +459,8 @@ class ImportTask(object):
def apply_metadata(self):
"""Copy metadata from match info to the items.
"""
+ # TODO call should be more descriptive like
+ # apply_metadata(self.match, self.items)
autotag.apply_metadata(self.match.info, self.match.mapping)
def duplicate_items(self, lib):
@@ -446,13 +471,13 @@ class ImportTask(object):
def remove_duplicates(self, lib):
duplicate_items = self.duplicate_items(lib)
- log.debug('removing %i old duplicated items' %
- len(duplicate_items))
+ log.debug(u'removing {0} old duplicated items'
+ .format(len(duplicate_items)))
for item in duplicate_items:
item.remove()
if lib.directory in util.ancestry(item.path):
- log.debug(u'deleting duplicate %s' %
- util.displayable_path(item.path))
+ log.debug(u'deleting duplicate {0}'
+ .format(util.displayable_path(item.path)))
util.remove(item.path)
util.prune_dirs(os.path.dirname(item.path),
lib.directory)
@@ -542,7 +567,7 @@ class ImportTask(object):
duplicates.append(album)
return duplicates
- def infer_album_fields(self):
+ def align_album_level_fields(self):
"""Make the some album fields equal across `self.items`
"""
changes = {}
@@ -617,28 +642,91 @@ class ImportTask(object):
def add(self, lib):
"""Add the items as an album to the library and remove replaced items.
"""
+ self.align_album_level_fields()
with lib.transaction():
+ self.record_replaced(lib)
self.remove_replaced(lib)
self.album = lib.add_album(self.imported_items())
+ self.reimport_metadata(lib)
- def remove_replaced(self, lib):
- """Removes all the items from the library that have the same
- path as an item from this task.
-
- Records the replaced items in the `replaced_items` dictionary
+ def record_replaced(self, lib):
+ """Records the replaced items and albums in the `replaced_items`
+ and `replaced_albums` dictionaries.
"""
self.replaced_items = defaultdict(list)
+ self.replaced_albums = defaultdict(list)
+ replaced_album_ids = set()
for item in self.imported_items():
dup_items = list(lib.items(
dbcore.query.BytesQuery('path', item.path)
))
self.replaced_items[item] = dup_items
for dup_item in dup_items:
- log.debug('replacing item %i: %s' %
- (dup_item.id, displayable_path(item.path)))
+ if (not dup_item.album_id or
+ dup_item.album_id in replaced_album_ids):
+ continue
+ replaced_album = dup_item.get_album()
+ if replaced_album:
+ replaced_album_ids.add(dup_item.album_id)
+ self.replaced_albums[replaced_album.path] = replaced_album
+
+ def reimport_metadata(self, lib):
+ """For reimports, preserves metadata for reimported items and
+ albums.
+ """
+ if self.is_album:
+ replaced_album = self.replaced_albums.get(self.album.path)
+ if replaced_album:
+ self.album.added = replaced_album.added
+ self.album.update(replaced_album._values_flex)
+ self.album.store()
+ log.debug(
+ u'Reimported album: added {0}, flexible '
+ u'attributes {1} from album {2} for {3}'.format(
+ self.album.added,
+ replaced_album._values_flex.keys(),
+ replaced_album.id,
+ displayable_path(self.album.path),
+ )
+ )
+
+ for item in self.imported_items():
+ dup_items = self.replaced_items[item]
+ for dup_item in dup_items:
+ if dup_item.added and dup_item.added != item.added:
+ item.added = dup_item.added
+ log.debug(
+ u'Reimported item added {0} '
+ u'from item {1} for {2}'.format(
+ item.added,
+ dup_item.id,
+ displayable_path(item.path),
+ )
+ )
+ item.update(dup_item._values_flex)
+ log.debug(
+ u'Reimported item flexible attributes {0} '
+ u'from item {1} for {2}'.format(
+ dup_item._values_flex.keys(),
+ dup_item.id,
+ displayable_path(item.path),
+ )
+ )
+ item.store()
+
+ def remove_replaced(self, lib):
+ """Removes all the items from the library that have the same
+ path as an item from this task.
+ """
+ for item in self.imported_items():
+ for dup_item in self.replaced_items[item]:
+ log.debug(u'Replacing item {0}: {1}'
+ .format(dup_item.id,
+ displayable_path(item.path)))
dup_item.remove()
- log.debug('%i of %i items replaced' % (len(self.replaced_items),
- len(self.imported_items())))
+ log.debug(u'{0} of {1} items replaced'
+ .format(sum(bool(l) for l in self.replaced_items.values()),
+ len(self.imported_items())))
def choose_match(self, session):
"""Ask the session which match should apply and apply it.
@@ -726,8 +814,10 @@ class SingletonImportTask(ImportTask):
def add(self, lib):
with lib.transaction():
+ self.record_replaced(lib)
self.remove_replaced(lib)
lib.add(self.item)
+ self.reimport_metadata(lib)
def infer_album_fields(self):
raise NotImplementedError
@@ -958,17 +1048,17 @@ def read_tasks(session):
archive_task = None
if ArchiveImportTask.is_archive(syspath(toppath)):
if not (session.config['move'] or session.config['copy']):
- log.warn("Archive importing requires either "
+ log.warn(u"Archive importing requires either "
"'copy' or 'move' to be enabled.")
continue
- log.debug('extracting archive {0}'
+ log.debug(u'extracting archive {0}'
.format(displayable_path(toppath)))
archive_task = ArchiveImportTask(toppath)
try:
archive_task.extract()
except Exception as exc:
- log.error('extraction failed: {0}'.format(exc))
+ log.error(u'extraction failed: {0}'.format(exc))
continue
# Continue reading albums from the extracted directory.
@@ -1036,8 +1126,8 @@ def query_tasks(session):
else:
# Search for albums.
for album in session.lib.albums(session.query):
- log.debug('yielding album %i: %s - %s' %
- (album.id, album.albumartist, album.album))
+ log.debug(u'yielding album {0}: {1} - {2}'
+ .format(album.id, album.albumartist, album.album))
items = list(album.items())
# Clear IDs from re-tagged items so they appear "fresh" when
@@ -1062,7 +1152,7 @@ def lookup_candidates(session, task):
return
plugins.send('import_task_start', session=session, task=task)
- log.debug('Looking up: %s' % displayable_path(task.paths))
+ log.debug(u'Looking up: {0}'.format(displayable_path(task.paths)))
task.lookup_candidates()
@@ -1140,9 +1230,6 @@ def import_asis(session, task):
return
log.info(displayable_path(task.paths))
-
- # Behave as if ASIS were selected.
- task.set_null_candidates()
task.set_choice(action.ASIS)
@@ -1159,10 +1246,6 @@ def apply_choices(session, task):
task.apply_metadata()
plugins.send('import_task_apply', session=session, task=task)
- # Infer album-level fields.
- if task.is_album:
- task.infer_album_fields()
-
task.add(session.lib)
diff --git a/beets/library.py b/beets/library.py
index dd0da3b30..e90e575c0 100644
--- a/beets/library.py
+++ b/beets/library.py
@@ -60,13 +60,10 @@ class PathQuery(dbcore.FieldQuery):
# Library-specific field types.
-
-class DateType(types.Type):
+class DateType(types.Float):
# TODO representation should be `datetime` object
# TODO distinguish beetween date and time types
- sql = u'REAL'
query = dbcore.query.DateQuery
- null = 0.0
def format(self, value):
return time.strftime(beets.config['time_format'].get(unicode),
@@ -89,6 +86,7 @@ class DateType(types.Type):
class PathType(types.Type):
sql = u'BLOB'
query = PathQuery
+ model_type = bytes
def format(self, value):
return util.displayable_path(value)
@@ -109,6 +107,14 @@ class PathType(types.Type):
else:
return value
+ def from_sql(self, sql_value):
+ return self.normalize(sql_value)
+
+ def to_sql(self, value):
+ if isinstance(value, str):
+ value = buffer(value)
+ return value
+
class MusicalKey(types.String):
"""String representing the musical key of a song.
@@ -137,6 +143,34 @@ class MusicalKey(types.String):
return self.parse(key)
+# Library-specific sort types.
+
+class SmartArtistSort(dbcore.query.Sort):
+ """Sort by artist (either album artist or track artist),
+ prioritizing the sort field over the raw field.
+ """
+ def __init__(self, model_cls, ascending=True):
+ self.album = model_cls is Album
+ self.ascending = ascending
+
+ def order_clause(self):
+ order = "ASC" if self.ascending else "DESC"
+ if self.album:
+ field = 'albumartist'
+ else:
+ field = 'artist'
+ return ('(CASE {0}_sort WHEN NULL THEN {0} '
+ 'WHEN "" THEN {0} '
+ 'ELSE {0}_sort END) {1}').format(field, order)
+
+ def sort(self, objs):
+ if self.album:
+ key = lambda a: a.albumartist_sort or a.albumartist
+ else:
+ key = lambda i: i.artist_sort or i.artist
+ return sorted(objs, key=key, reverse=not self.ascending)
+
+
# Special path format key.
PF_KEY_DEFAULT = 'default'
@@ -188,7 +222,6 @@ class WriteError(FileOperationError):
class LibModel(dbcore.Model):
"""Shared concrete functionality for Items and Albums.
"""
- _bytes_keys = ('path', 'artpath')
def _template_funcs(self):
funcs = DefaultTemplateFunctions(self, self._db).functions()
@@ -341,6 +374,8 @@ class Item(LibModel):
_formatter = FormattedItemMapping
+ _sorts = {'artist': SmartArtistSort}
+
@classmethod
def _getters(cls):
getters = plugins.item_field_getters()
@@ -438,7 +473,8 @@ class Item(LibModel):
else:
path = normpath(path)
- plugins.send('write', item=self, path=path)
+ tags = dict(self)
+ plugins.send('write', item=self, path=path, tags=tags)
try:
mediafile = MediaFile(syspath(path),
@@ -446,7 +482,7 @@ class Item(LibModel):
except (OSError, IOError, UnreadableFileError) as exc:
raise ReadError(self.path, exc)
- mediafile.update(self)
+ mediafile.update(tags)
try:
mediafile.save()
except (OSError, IOError, MutagenError) as exc:
@@ -470,6 +506,22 @@ class Item(LibModel):
log.error(exc)
return False
+ def try_sync(self, write=None):
+ """Synchronize the item with the database and the media file
+ tags, updating them with this object's current state.
+
+ By default, the current `path` for the item is used to write
+ tags. If `write` is `False`, no tags are written. If `write` is
+ a path, tags are written to that file instead.
+
+ Similar to calling :meth:`write` and :meth:`store`.
+ """
+ if write is True:
+ write = None
+ if write is not False:
+ self.try_write(path=write)
+ self.store()
+
# Files themselves.
def move_file(self, dest, copy=False, link=False):
@@ -591,7 +643,7 @@ class Item(LibModel):
for query, path_format in path_formats:
if query == PF_KEY_DEFAULT:
continue
- (query, _) = get_query_sort(query, type(self))
+ query, _ = parse_query_string(query, type(self))
if query.match(self):
# The query matches the item! Use the corresponding path
# format.
@@ -692,6 +744,11 @@ class Album(LibModel):
_search_fields = ('album', 'albumartist', 'genre')
+ _sorts = {
+ 'albumartist': SmartArtistSort,
+ 'artist': SmartArtistSort,
+ }
+
item_keys = [
'added',
'albumartist',
@@ -774,7 +831,9 @@ class Album(LibModel):
return
new_art = util.unique_path(new_art)
- log.debug('moving album art %s to %s' % (old_art, new_art))
+ log.debug(u'moving album art {0} to {1}'
+ .format(util.displayable_path(old_art),
+ util.displayable_path(new_art)))
if copy:
util.copy(old_art, new_art)
elif link:
@@ -888,71 +947,68 @@ class Album(LibModel):
item[key] = value
item.store()
+ def try_sync(self, write=True):
+ """Synchronize the album and its items with the database and
+ their files by updating them with this object's current state.
-# Query construction and parsing helpers.
+ `write` indicates whether to write tags to the item files.
+ """
+ self.store()
+ for item in self.items():
+ item.try_sync(bool(write))
-def get_query_sort(val, model_cls):
- """Take a value which may be None, a query string, a query string
- list, or a Query object, and return a suitable Query object and Sort
- object.
- `model_cls` is the subclass of Model indicating which entity this
- is a query for (i.e., Album or Item) and is used to determine which
- fields are searched.
+# Query construction helpers.
+
+def parse_query_parts(parts, model_cls):
+ """Given a beets query string as a list of components, return the
+ `Query` and `Sort` they represent.
+
+ Like `dbcore.parse_sorted_query`, with beets query prefixes and
+ special path query detection.
"""
# Get query types and their prefix characters.
prefixes = {':': dbcore.query.RegexpQuery}
prefixes.update(plugins.queries())
- # Convert a single string into a list of space-separated
- # criteria.
- if isinstance(val, basestring):
- # A bug in Python < 2.7.3 prevents correct shlex splitting of
- # Unicode strings.
- # http://bugs.python.org/issue6988
- if isinstance(val, unicode):
- val = val.encode('utf8')
- val = [s.decode('utf8') for s in shlex.split(val)]
-
- if val is None:
- return (dbcore.query.TrueQuery(), None)
-
- elif isinstance(val, list) or isinstance(val, tuple):
- # Special-case path-like queries, which are non-field queries
- # containing path separators (/).
- if 'path' in model_cls._fields:
- path_parts = []
- non_path_parts = []
- for s in val:
- if s.find(os.sep, 0, s.find(':')) != -1:
- # Separator precedes colon.
- path_parts.append(s)
- else:
- non_path_parts.append(s)
- else:
- path_parts = ()
- non_path_parts = val
-
- # separate query token and sort token
- query_val = [s for s in non_path_parts if not s.endswith(('+', '-'))]
- sort_val = [s for s in non_path_parts if s.endswith(('+', '-'))]
-
- # Parse remaining parts and construct an AndQuery.
- query = dbcore.query_from_strings(
- dbcore.AndQuery, model_cls, prefixes, query_val
- )
- sort = dbcore.sort_from_strings(model_cls, sort_val)
-
- # Add path queries to aggregate query.
- if path_parts:
- query.subqueries += [PathQuery('path', s) for s in path_parts]
- return query, sort
-
- elif isinstance(val, dbcore.Query):
- return val, None
-
+ # Special-case path-like queries, which are non-field queries
+ # containing path separators (/).
+ if 'path' in model_cls._fields:
+ path_parts = []
+ non_path_parts = []
+ for s in parts:
+ if s.find(os.sep, 0, s.find(':')) != -1:
+ # Separator precedes colon.
+ path_parts.append(s)
+ else:
+ non_path_parts.append(s)
else:
- raise ValueError('query must be None or have type Query or str')
+ path_parts = ()
+ non_path_parts = parts
+
+ query, sort = dbcore.parse_sorted_query(
+ model_cls, non_path_parts, prefixes
+ )
+
+ # Add path queries to aggregate query.
+ if path_parts:
+ query.subqueries += [PathQuery('path', s) for s in path_parts]
+ return query, sort
+
+
+def parse_query_string(s, model_cls):
+ """Given a beets query string, return the `Query` and `Sort` they
+ represent.
+
+ The string is split into components using shell-like syntax.
+ """
+ # A bug in Python < 2.7.3 prevents correct shlex splitting of
+ # Unicode strings.
+ # http://bugs.python.org/issue6988
+ if isinstance(s, unicode):
+ s = s.encode('utf8')
+ parts = [p.decode('utf8') for p in shlex.split(s)]
+ return parse_query_parts(parts, model_cls)
# The Library: interface to the database.
@@ -1016,30 +1072,41 @@ class Library(dbcore.Database):
# Querying.
- def _fetch(self, model_cls, query, sort_order=None):
- """Parse a query and fetch. If a order specification is present in the
- query string the sort_order argument is ignored.
- """
- query, sort = get_query_sort(query, model_cls)
- sort = sort or sort_order
+ def _fetch(self, model_cls, query, sort=None):
+ """Parse a query and fetch. If a order specification is present
+ in the query string the `sort` argument is ignored.
+ """
+ # Parse the query, if necessary.
+ parsed_sort = None
+ if isinstance(query, basestring):
+ query, parsed_sort = parse_query_string(query, model_cls)
+ elif isinstance(query, (list, tuple)):
+ query, parsed_sort = parse_query_parts(query, model_cls)
+
+ # Any non-null sort specified by the parsed query overrides the
+ # provided sort.
+ if parsed_sort and not isinstance(parsed_sort, dbcore.query.NullSort):
+ sort = parsed_sort
return super(Library, self)._fetch(
model_cls, query, sort
)
- def albums(self, query=None, sort_order=None):
- """Get a sorted list of :class:`Album` objects matching the
- given sort order. If a order specification is present in the query
- string the sort_order argument is ignored.
+ def albums(self, query=None, sort=None):
+ """Get :class:`Album` objects matching the query.
"""
- return self._fetch(Album, query, sort_order)
+ sort = sort or dbcore.sort_from_strings(
+ Album, beets.config['sort_album'].as_str_seq()
+ )
+ return self._fetch(Album, query, sort)
- def items(self, query=None, sort_order=None):
- """Get a sorted list of :class:`Item` objects matching the given
- given sort order. If a order specification is present in the query
- string the sort_order argument is ignored.
+ def items(self, query=None, sort=None):
+ """Get :class:`Item` objects matching the query.
"""
- return self._fetch(Item, query, sort_order)
+ sort = sort or dbcore.sort_from_strings(
+ Item, beets.config['sort_item'].as_str_seq()
+ )
+ return self._fetch(Item, query, sort)
# Convenience accessors.
diff --git a/beets/mediafile.py b/beets/mediafile.py
index 6c08dac3f..95bbd309d 100644
--- a/beets/mediafile.py
+++ b/beets/mediafile.py
@@ -1263,7 +1263,7 @@ class MediaFile(object):
except Exception as exc:
# Isolate bugs in Mutagen.
log.debug(traceback.format_exc())
- log.error('uncaught Mutagen exception in open: {0}'.format(exc))
+ log.error(u'uncaught Mutagen exception in open: {0}'.format(exc))
raise MutagenError(path, exc)
if self.mgfile is None:
@@ -1330,7 +1330,7 @@ class MediaFile(object):
raise
except Exception as exc:
log.debug(traceback.format_exc())
- log.error('uncaught Mutagen exception in save: {0}'.format(exc))
+ log.error(u'uncaught Mutagen exception in save: {0}'.format(exc))
raise MutagenError(self.path, exc)
def delete(self):
diff --git a/beets/plugins.py b/beets/plugins.py
index 2ee5f88f2..eac64e640 100755
--- a/beets/plugins.py
+++ b/beets/plugins.py
@@ -202,7 +202,7 @@ def load_plugins(names=()):
except ImportError as exc:
# Again, this is hacky:
if exc.args[0].endswith(' ' + name):
- log.warn('** plugin %s not found' % name)
+ log.warn(u'** plugin {0} not found'.format(name))
else:
raise
else:
@@ -212,7 +212,7 @@ def load_plugins(names=()):
_classes.add(obj)
except:
- log.warn('** error loading plugin %s' % name)
+ log.warn(u'** error loading plugin {0}'.format(name))
log.warn(traceback.format_exc())
@@ -395,7 +395,7 @@ def send(event, **arguments):
Returns a list of return values from the handlers.
"""
- log.debug('Sending event: %s' % event)
+ log.debug(u'Sending event: {0}'.format(event))
for handler in event_handlers()[event]:
# Don't break legacy plugins if we want to pass more arguments
argspec = inspect.getargspec(handler).args
diff --git a/beets/ui/__init__.py b/beets/ui/__init__.py
index c3bb7a158..3d469d395 100644
--- a/beets/ui/__init__.py
+++ b/beets/ui/__init__.py
@@ -441,30 +441,6 @@ def colordiff(a, b, highlight='red'):
return unicode(a), unicode(b)
-def color_diff_suffix(a, b, highlight='red'):
- """Colorize the differing suffix between two strings."""
- a, b = unicode(a), unicode(b)
- if not config['color']:
- return a, b
-
- # Fast path.
- if a == b:
- return a, b
-
- # Find the longest common prefix.
- first_diff = None
- for i in range(min(len(a), len(b))):
- if a[i] != b[i]:
- first_diff = i
- break
- else:
- first_diff = min(len(a), len(b))
-
- # Colorize from the first difference on.
- return (a[:first_diff] + colorize(highlight, a[first_diff:]),
- b[:first_diff] + colorize(highlight, b[first_diff:]))
-
-
def get_path_formats(subview=None):
"""Get the configuration's path formats as a list of query/template
pairs.
@@ -895,10 +871,10 @@ def _configure(options):
config_path = config.user_config_path()
if os.path.isfile(config_path):
- log.debug('user configuration: {0}'.format(
+ log.debug(u'user configuration: {0}'.format(
util.displayable_path(config_path)))
else:
- log.debug('no user configuration found at {0}'.format(
+ log.debug(u'no user configuration found at {0}'.format(
util.displayable_path(config_path)))
log.debug(u'data directory: {0}'
@@ -923,10 +899,8 @@ def _open_library(config):
))
log.debug(u'library database: {0}\n'
u'library directory: {1}'
- .format(
- util.displayable_path(lib.path),
- util.displayable_path(lib.directory),
- ))
+ .format(util.displayable_path(lib.path),
+ util.displayable_path(lib.directory)))
return lib
diff --git a/beets/ui/commands.py b/beets/ui/commands.py
index adf0070a0..947fc22ba 100644
--- a/beets/ui/commands.py
+++ b/beets/ui/commands.py
@@ -20,7 +20,6 @@ from __future__ import print_function
import logging
import os
import time
-import itertools
import codecs
import platform
import re
@@ -39,7 +38,6 @@ from beets.util.functemplate import Template
from beets import library
from beets import config
from beets.util.confit import _package_path
-from beets.dbcore import sort_from_strings
VARIOUS_ARTISTS = u'Various Artists'
@@ -320,17 +318,9 @@ def show_change(cur_artist, cur_album, match):
color = 'lightgray'
else:
color = 'red'
- if (cur_track + new_track).count('-') == 1:
- lhs_track, rhs_track = (ui.colorize(color, cur_track),
- ui.colorize(color, new_track))
- else:
- color = 'red'
- lhs_track, rhs_track = ui.color_diff_suffix(cur_track,
- new_track)
- templ = (ui.colorize(color, u' (#') + u'{0}' +
- ui.colorize(color, u')'))
- lhs += templ.format(lhs_track)
- rhs += templ.format(rhs_track)
+ templ = ui.colorize(color, u' (#{0})')
+ lhs += templ.format(cur_track)
+ rhs += templ.format(new_track)
lhs_width += len(cur_track) + 4
# Length change.
@@ -339,12 +329,9 @@ def show_change(cur_artist, cur_album, match):
config['ui']['length_diff_thresh'].as_number():
cur_length = ui.human_seconds_short(item.length)
new_length = ui.human_seconds_short(track_info.length)
- lhs_length, rhs_length = ui.color_diff_suffix(cur_length,
- new_length)
- templ = (ui.colorize('red', u' (') + u'{0}' +
- ui.colorize('red', u')'))
- lhs += templ.format(lhs_length)
- rhs += templ.format(rhs_length)
+ templ = ui.colorize('red', u' ({0})')
+ lhs += templ.format(cur_length)
+ rhs += templ.format(new_length)
lhs_width += len(cur_length) + 3
# Penalties.
@@ -777,12 +764,12 @@ class TerminalImportSession(importer.ImportSession):
"""Decide what to do when a new album or item seems similar to one
that's already in the library.
"""
- log.warn("This %s is already in the library!" %
- ("album" if task.is_album else "item"))
+ log.warn(u"This {0} is already in the library!"
+ .format("album" if task.is_album else "item"))
if config['import']['quiet']:
# In quiet mode, don't prompt -- just skip.
- log.info('Skipping.')
+ log.info(u'Skipping.')
sel = 's'
else:
# Print some detail about the existing and new items so the
@@ -967,18 +954,11 @@ def list_items(lib, query, album, fmt):
albums instead of single items.
"""
tmpl = Template(ui._pick_format(album, fmt))
-
if album:
- sort_parts = str(config['sort_album']).split()
- sort_order = sort_from_strings(library.Album,
- sort_parts)
- for album in lib.albums(query, sort_order):
+ for album in lib.albums(query):
ui.print_obj(album, lib, tmpl)
else:
- sort_parts = str(config['sort_item']).split()
- sort_order = sort_from_strings(library.Item,
- sort_parts)
- for item in lib.items(query, sort_order):
+ for item in lib.items(query):
ui.print_obj(item, lib, tmpl)
@@ -1030,8 +1010,8 @@ def update_items(lib, query, album, move, pretend):
# Did the item change since last checked?
if item.current_mtime() <= item.mtime:
- log.debug(u'skipping %s because mtime is up to date (%i)' %
- (displayable_path(item.path), item.mtime))
+ log.debug(u'skipping {0} because mtime is up to date ({1})'
+ .format(displayable_path(item.path), item.mtime))
continue
# Read new data.
@@ -1081,7 +1061,7 @@ def update_items(lib, query, album, move, pretend):
continue
album = lib.get_album(album_id)
if not album: # Empty albums have already been removed.
- log.debug('emptied album %i' % album_id)
+ log.debug(u'emptied album {0}'.format(album_id))
continue
first_item = album.items().get()
@@ -1092,7 +1072,7 @@ def update_items(lib, query, album, move, pretend):
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
- log.debug('moving album %i' % album_id)
+ log.debug(u'moving album {0}'.format(album_id))
album.move()
@@ -1298,25 +1278,17 @@ def modify_items(lib, mods, dels, query, write, move, album, confirm):
if not ui.input_yn('Really modify%s (Y/n)?' % extra):
return
- # Apply changes to database.
+ # Apply changes to database and files
with lib.transaction():
for obj in changed:
if move:
cur_path = obj.path
if lib.directory in ancestry(cur_path): # In library?
- log.debug('moving object %s' % cur_path)
+ log.debug(u'moving object {0}'
+ .format(displayable_path(cur_path)))
obj.move()
- obj.store()
-
- # Apply tags if requested.
- if write:
- if album:
- changed_items = itertools.chain(*(a.items() for a in changed))
- else:
- changed_items = changed
- for item in changed_items:
- item.try_write()
+ obj.try_sync(write)
def modify_parse_args(args):
@@ -1391,9 +1363,9 @@ def move_items(lib, dest, query, copy, album):
action = 'Copying' if copy else 'Moving'
entity = 'album' if album else 'item'
- log.info('%s %i %ss.' % (action, len(objs), entity))
+ log.info(u'{0} {1} {2}s.'.format(action, len(objs), entity))
for obj in objs:
- log.debug('moving: %s' % obj.path)
+ log.debug(u'moving: {0}'.format(util.displayable_path(obj.path)))
obj.move(copy, basedir=dest)
obj.store()
@@ -1457,7 +1429,7 @@ def write_items(lib, query, pretend, force):
changed = ui.show_model_changes(item, clean_item,
library.Item._media_fields, force)
if (changed or force) and not pretend:
- item.try_write()
+ item.try_sync()
def write_func(lib, opts, args):
diff --git a/beets/util/artresizer.py b/beets/util/artresizer.py
index 21a2135b4..7cd12943c 100644
--- a/beets/util/artresizer.py
+++ b/beets/util/artresizer.py
@@ -1,5 +1,5 @@
# This file is part of beets.
-# Copyright 2013, Fabrice Laporte
+# Copyright 2014, Fabrice Laporte
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
@@ -18,6 +18,7 @@ public resizing proxy if neither is available.
import urllib
import subprocess
import os
+import re
from tempfile import NamedTemporaryFile
import logging
from beets import util
@@ -76,7 +77,7 @@ def pil_resize(maxwidth, path_in, path_out=None):
def im_resize(maxwidth, path_in, path_out=None):
"""Resize using ImageMagick's ``convert`` tool.
- tool. Return the output path of resized image.
+ Return the output path of resized image.
"""
path_out = path_out or temp_file_for(path_in)
log.debug(u'artresizer: ImageMagick resizing {0} to {1}'.format(
@@ -132,8 +133,9 @@ class ArtResizer(object):
"""Create a resizer object for the given method or, if none is
specified, with an inferred method.
"""
- self.method = method or self._guess_method()
+ self.method = self._check_method(method)
log.debug(u"artresizer: method is {0}".format(self.method))
+ self.can_compare = self._can_compare()
def resize(self, maxwidth, path_in, path_out=None):
"""Manipulate an image file according to the method, returning a
@@ -159,30 +161,51 @@ class ArtResizer(object):
@property
def local(self):
"""A boolean indicating whether the resizing method is performed
- locally (i.e., PIL or IMAGEMAGICK).
+ locally (i.e., PIL or ImageMagick).
"""
- return self.method in BACKEND_FUNCS
+ return self.method[0] in BACKEND_FUNCS
+
+ def _can_compare(self):
+ """A boolean indicating whether image comparison is available"""
+
+ return self.method[0] == IMAGEMAGICK and self.method[1] > (6, 8, 7)
@staticmethod
- def _guess_method():
- """Determine which resizing method to use. Returns PIL,
- IMAGEMAGICK, or WEBPROXY depending on available dependencies.
+ def _check_method(method=None):
+ """A tuple indicating whether current method is available and its
+ version. If no method is given, it returns a supported one.
"""
- # Try importing PIL.
- try:
- __import__('PIL', fromlist=['Image'])
- return PIL
- except ImportError:
- pass
+ # Guess available method
+ if not method:
+ for m in [IMAGEMAGICK, PIL]:
+ _, version = ArtResizer._check_method(m)
+ if version:
+ return (m, version)
+ return (WEBPROXY, (0))
- # Try invoking ImageMagick's "convert".
- try:
- out = util.command_output(['convert', '--version'])
- if 'imagemagick' in out.lower():
- # system32/convert.exe may be interfering
- return IMAGEMAGICK
- except (subprocess.CalledProcessError, OSError):
- pass
+ if method == IMAGEMAGICK:
- # Fall back to Web proxy method.
- return WEBPROXY
+ # Try invoking ImageMagick's "convert".
+ try:
+ out = util.command_output(['identify', '--version'])
+
+ if 'imagemagick' in out.lower():
+ pattern = r".+ (\d+)\.(\d+)\.(\d+).*"
+ match = re.search(pattern, out)
+ if match:
+ return (IMAGEMAGICK,
+ (int(match.group(1)),
+ int(match.group(2)),
+ int(match.group(3))))
+ return (IMAGEMAGICK, (0))
+
+ except (subprocess.CalledProcessError, OSError):
+ return (IMAGEMAGICK, None)
+
+ if method == PIL:
+ # Try importing PIL.
+ try:
+ __import__('PIL', fromlist=['Image'])
+ return (PIL, (0))
+ except ImportError:
+ return (PIL, None)
diff --git a/beetsplug/beatport.py b/beetsplug/beatport.py
index 86193a971..b83aef2f7 100644
--- a/beetsplug/beatport.py
+++ b/beetsplug/beatport.py
@@ -194,7 +194,7 @@ class BeatportPlugin(BeetsPlugin):
try:
return self._get_releases(query)
except BeatportAPIError as e:
- log.debug('Beatport API Error: %s (query: %s)' % (e, query))
+ log.debug(u'Beatport API Error: {0} (query: {1})'.format(e, query))
return []
def item_candidates(self, item, artist, title):
@@ -205,14 +205,14 @@ class BeatportPlugin(BeetsPlugin):
try:
return self._get_tracks(query)
except BeatportAPIError as e:
- log.debug('Beatport API Error: %s (query: %s)' % (e, query))
+ log.debug(u'Beatport API Error: {0} (query: {1})'.format(e, query))
return []
def album_for_id(self, release_id):
"""Fetches a release by its Beatport ID and returns an AlbumInfo object
or None if the release is not found.
"""
- log.debug('Searching Beatport for release %s' % str(release_id))
+ log.debug(u'Searching Beatport for release {0}'.format(release_id))
match = re.search(r'(^|beatport\.com/release/.+/)(\d+)$', release_id)
if not match:
return None
@@ -224,7 +224,7 @@ class BeatportPlugin(BeetsPlugin):
"""Fetches a track by its Beatport ID and returns a TrackInfo object
or None if the track is not found.
"""
- log.debug('Searching Beatport for track %s' % str(track_id))
+ log.debug(u'Searching Beatport for track {0}'.format(str(track_id)))
match = re.search(r'(^|beatport\.com/track/.+/)(\d+)$', track_id)
if not match:
return None
diff --git a/beetsplug/bpd/__init__.py b/beetsplug/bpd/__init__.py
index 8b565d9dc..7b550487c 100644
--- a/beetsplug/bpd/__init__.py
+++ b/beetsplug/bpd/__init__.py
@@ -1149,20 +1149,23 @@ class BPDPlugin(BeetsPlugin):
'host': u'',
'port': 6600,
'password': u'',
+ 'volume': VOLUME_MAX,
})
- def start_bpd(self, lib, host, port, password, debug):
+ def start_bpd(self, lib, host, port, password, volume, debug):
"""Starts a BPD server."""
if debug:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.WARNING)
try:
- Server(lib, host, port, password).run()
+ server = Server(lib, host, port, password)
+ server.cmd_setvol(None, volume)
+ server.run()
except NoGstreamerError:
- global_log.error('Gstreamer Python bindings not found.')
- global_log.error('Install "python-gst0.10", "py27-gst-python", '
- 'or similar package to use BPD.')
+ global_log.error(u'Gstreamer Python bindings not found.')
+ global_log.error(u'Install "python-gst0.10", "py27-gst-python", '
+ u'or similar package to use BPD.')
def commands(self):
cmd = beets.ui.Subcommand(
@@ -1179,8 +1182,9 @@ class BPDPlugin(BeetsPlugin):
if args:
raise beets.ui.UserError('too many arguments')
password = self.config['password'].get(unicode)
+ volume = self.config['volume'].get(int)
debug = opts.debug or False
- self.start_bpd(lib, host, int(port), password, debug)
+ self.start_bpd(lib, host, int(port), password, volume, debug)
cmd.func = func
return [cmd]
diff --git a/beetsplug/bpm.py b/beetsplug/bpm.py
index 977d59dcc..d895ec5be 100644
--- a/beetsplug/bpm.py
+++ b/beetsplug/bpm.py
@@ -73,15 +73,15 @@ class BPMPlugin(BeetsPlugin):
item = items[0]
if item['bpm']:
- log.info('Found bpm {0}'.format(item['bpm']))
+ log.info(u'Found bpm {0}'.format(item['bpm']))
if not overwrite:
return
- log.info('Press Enter {0} times to the rhythm or Ctrl-D \
-to exit'.format(self.config['max_strokes'].get(int)))
+ log.info(u'Press Enter {0} times to the rhythm or Ctrl-D '
+ u'to exit'.format(self.config['max_strokes'].get(int)))
new_bpm = bpm(self.config['max_strokes'].get(int))
item['bpm'] = int(new_bpm)
if write:
item.try_write()
item.store()
- log.info('Added new bpm {0}'.format(item['bpm']))
+ log.info(u'Added new bpm {0}'.format(item['bpm']))
diff --git a/beetsplug/chroma.py b/beetsplug/chroma.py
index b5b8b1be3..40903dad8 100644
--- a/beetsplug/chroma.py
+++ b/beetsplug/chroma.py
@@ -53,32 +53,33 @@ def acoustid_match(path):
try:
duration, fp = acoustid.fingerprint_file(util.syspath(path))
except acoustid.FingerprintGenerationError as exc:
- log.error('fingerprinting of %s failed: %s' %
- (repr(path), str(exc)))
+ log.error(u'fingerprinting of {0} failed: {1}'
+ .format(util.displayable_path(repr(path)), str(exc)))
return None
_fingerprints[path] = fp
try:
res = acoustid.lookup(API_KEY, fp, duration,
meta='recordings releases')
except acoustid.AcoustidError as exc:
- log.debug('fingerprint matching %s failed: %s' %
- (repr(path), str(exc)))
+ log.debug(u'fingerprint matching {0} failed: {1}'
+ .format(util.displayable_path(repr(path)), str(exc)))
return None
- log.debug('chroma: fingerprinted %s' % repr(path))
+ log.debug(u'chroma: fingerprinted {0}'
+ .format(util.displayable_path(repr(path))))
# Ensure the response is usable and parse it.
if res['status'] != 'ok' or not res.get('results'):
- log.debug('chroma: no match found')
+ log.debug(u'chroma: no match found')
return None
result = res['results'][0] # Best match.
if result['score'] < SCORE_THRESH:
- log.debug('chroma: no results above threshold')
+ log.debug(u'chroma: no results above threshold')
return None
_acoustids[path] = result['id']
# Get recording and releases from the result.
if not result.get('recordings'):
- log.debug('chroma: no recordings found')
+ log.debug(u'chroma: no recordings found')
return None
recording_ids = []
release_ids = []
@@ -87,7 +88,7 @@ def acoustid_match(path):
if 'releases' in recording:
release_ids += [rel['id'] for rel in recording['releases']]
- log.debug('chroma: matched recordings {0}'.format(recording_ids))
+ log.debug(u'chroma: matched recordings {0}'.format(recording_ids))
_matches[path] = recording_ids, release_ids
@@ -141,7 +142,7 @@ class AcoustidPlugin(plugins.BeetsPlugin):
if album:
albums.append(album)
- log.debug('acoustid album candidates: %i' % len(albums))
+ log.debug(u'acoustid album candidates: {0}'.format(len(albums)))
return albums
def item_candidates(self, item, artist, title):
@@ -154,7 +155,7 @@ class AcoustidPlugin(plugins.BeetsPlugin):
track = hooks.track_for_mbid(recording_id)
if track:
tracks.append(track)
- log.debug('acoustid item candidates: {0}'.format(len(tracks)))
+ log.debug(u'acoustid item candidates: {0}'.format(len(tracks)))
return tracks
def commands(self):
@@ -216,7 +217,7 @@ def submit_items(userkey, items, chunksize=64):
def submit_chunk():
"""Submit the current accumulated fingerprint data."""
- log.info('submitting {0} fingerprints'.format(len(data)))
+ log.info(u'submitting {0} fingerprints'.format(len(data)))
try:
acoustid.submit(API_KEY, userkey, data)
except acoustid.AcoustidError as exc:
@@ -233,7 +234,7 @@ def submit_items(userkey, items, chunksize=64):
}
if item.mb_trackid:
item_data['mbid'] = item.mb_trackid
- log.debug('submitting MBID')
+ log.debug(u'submitting MBID')
else:
item_data.update({
'track': item.title,
@@ -244,7 +245,7 @@ def submit_items(userkey, items, chunksize=64):
'trackno': item.track,
'discno': item.disc,
})
- log.debug('submitting textual metadata')
+ log.debug(u'submitting textual metadata')
data.append(item_data)
# If we have enough data, submit a chunk.
@@ -294,6 +295,5 @@ def fingerprint_item(item, write=False):
item.store()
return item.acoustid_fingerprint
except acoustid.FingerprintGenerationError as exc:
- log.info(
- 'fingerprint generation failed: {0}'.format(exc)
- )
+ log.info(u'fingerprint generation failed: {0}'
+ .format(exc))
diff --git a/beetsplug/convert.py b/beetsplug/convert.py
index a86fc017a..90def8400 100644
--- a/beetsplug/convert.py
+++ b/beetsplug/convert.py
@@ -151,8 +151,12 @@ def convert_item(dest_dir, keep_new, path_formats, format, pretend=False):
if keep_new:
original = dest
converted = item.path
+ if should_transcode(item, format):
+ converted = replace_ext(converted, ext)
else:
original = item.path
+ if should_transcode(item, format):
+ dest = replace_ext(dest, ext)
converted = dest
# Ensure that only one thread tries to create directories at a
@@ -181,7 +185,6 @@ def convert_item(dest_dir, keep_new, path_formats, format, pretend=False):
util.move(item.path, original)
if should_transcode(item, format):
- converted = replace_ext(converted, ext)
try:
encode(command, original, converted, pretend)
except subprocess.CalledProcessError:
@@ -232,7 +235,7 @@ def convert_on_import(lib, item):
format = config['convert']['format'].get(unicode).lower()
if should_transcode(item, format):
command, ext = get_format()
- fd, dest = tempfile.mkstemp(ext)
+ fd, dest = tempfile.mkstemp('.' + ext)
os.close(fd)
_temp_files.append(dest) # Delete the transcode later.
try:
@@ -338,7 +341,7 @@ class ConvertPlugin(BeetsPlugin):
help='set the destination directory')
cmd.parser.add_option('-f', '--format', action='store', dest='format',
help='set the destination directory')
- cmd.parser.add_option('-y', '--yes', action='store', dest='yes',
+ cmd.parser.add_option('-y', '--yes', action='store_true', dest='yes',
help='do not ask for confirmation')
cmd.func = convert_func
return [cmd]
diff --git a/beetsplug/discogs.py b/beetsplug/discogs.py
index 949888eff..2cb2c0b15 100644
--- a/beetsplug/discogs.py
+++ b/beetsplug/discogs.py
@@ -60,14 +60,14 @@ class DiscogsPlugin(BeetsPlugin):
try:
return self.get_albums(query)
except DiscogsAPIError as e:
- log.debug('Discogs API Error: %s (query: %s' % (e, query))
+ log.debug(u'Discogs API Error: {0} (query: {1})'.format(e, query))
return []
def album_for_id(self, album_id):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
- log.debug('Searching discogs for release %s' % str(album_id))
+ log.debug(u'Searching Discogs for release {0}'.format(str(album_id)))
# Discogs-IDs are simple integers. We only look for those at the end
# of an input string as to avoid confusion with other metadata plugins.
# An optional bracket can follow the integer, as this is how discogs
@@ -82,8 +82,8 @@ class DiscogsPlugin(BeetsPlugin):
getattr(result, 'title')
except DiscogsAPIError as e:
if e.message != '404 Not Found':
- log.debug('Discogs API Error: %s (query: %s)'
- % (e, result._uri))
+ log.debug(u'Discogs API Error: {0} (query: {1})'
+ .format(e, result._uri))
return None
return self.get_album_info(result)
@@ -225,7 +225,7 @@ class DiscogsPlugin(BeetsPlugin):
if match:
medium, index = match.groups()
else:
- log.debug('Invalid discogs position: %s' % position)
+ log.debug(u'Invalid Discogs position: {0}'.format(position))
medium = index = None
return medium or None, index or None
diff --git a/beetsplug/duplicates.py b/beetsplug/duplicates.py
index 195cd1d16..fe0f8ac00 100644
--- a/beetsplug/duplicates.py
+++ b/beetsplug/duplicates.py
@@ -56,20 +56,20 @@ def _checksum(item, prog):
key = args[0]
checksum = getattr(item, key, False)
if not checksum:
- log.debug('%s: key %s on item %s not cached: computing checksum',
- PLUGIN, key, displayable_path(item.path))
+ log.debug(u'{0}: key {1} on item {2} not cached: computing checksum'
+ .format(PLUGIN, key, displayable_path(item.path)))
try:
checksum = command_output(args)
setattr(item, key, checksum)
item.store()
- log.debug('%s: computed checksum for %s using %s',
- PLUGIN, item.title, key)
+ log.debug(u'{)}: computed checksum for {1} using {2}'
+ .format(PLUGIN, item.title, key))
except subprocess.CalledProcessError as e:
- log.debug('%s: failed to checksum %s: %s',
- PLUGIN, displayable_path(item.path), e)
+ log.debug(u'{0}: failed to checksum {1}: {2}'
+ .format(PLUGIN, displayable_path(item.path), e))
else:
- log.debug('%s: key %s on item %s cached: not computing checksum',
- PLUGIN, key, displayable_path(item.path))
+ log.debug(u'{0}: key {1} on item {2} cached: not computing checksum'
+ .format(PLUGIN, key, displayable_path(item.path)))
return key, checksum
@@ -86,8 +86,8 @@ def _group_by(objs, keys):
key = '\001'.join(values)
counts[key].append(obj)
else:
- log.debug('%s: all keys %s on item %s are null: skipping',
- PLUGIN, str(keys), displayable_path(obj.path))
+ log.debug(u'{0}: all keys {1} on item {2} are null: skipping'
+ .format(PLUGIN, str(keys), displayable_path(obj.path)))
return counts
diff --git a/beetsplug/echonest_tempo.py b/beetsplug/echonest_tempo.py
index dea57c879..764594c3e 100644
--- a/beetsplug/echonest_tempo.py
+++ b/beetsplug/echonest_tempo.py
@@ -40,19 +40,19 @@ def fetch_item_tempo(lib, loglevel, item, write):
"""
# Skip if the item already has the tempo field.
if item.bpm:
- log.log(loglevel, u'bpm already present: %s - %s' %
- (item.artist, item.title))
+ log.log(loglevel, u'bpm already present: {0} - {1}'
+ .format(item.artist, item.title))
return
# Fetch tempo.
tempo = get_tempo(item.artist, item.title, item.length)
if not tempo:
- log.log(loglevel, u'tempo not found: %s - %s' %
- (item.artist, item.title))
+ log.log(loglevel, u'tempo not found: {0} - {1}'
+ .format(item.artist, item.title))
return
- log.log(loglevel, u'fetched tempo: %s - %s' %
- (item.artist, item.title))
+ log.log(loglevel, u'fetched tempo: {0} - {1}'
+ .format(item.artist, item.title))
item.bpm = int(tempo)
if write:
item.try_write()
diff --git a/beetsplug/embedart.py b/beetsplug/embedart.py
index 46e10a61d..6e22b7db6 100644
--- a/beetsplug/embedart.py
+++ b/beetsplug/embedart.py
@@ -16,6 +16,9 @@
import os.path
import logging
import imghdr
+import subprocess
+import platform
+from tempfile import NamedTemporaryFile
from beets.plugins import BeetsPlugin
from beets import mediafile
@@ -25,6 +28,7 @@ from beets.util import syspath, normpath, displayable_path
from beets.util.artresizer import ArtResizer
from beets import config
+
log = logging.getLogger('beets')
@@ -36,12 +40,18 @@ class EmbedCoverArtPlugin(BeetsPlugin):
self.config.add({
'maxwidth': 0,
'auto': True,
+ 'compare_threshold': 0,
})
- if self.config['maxwidth'].get(int) and \
- not ArtResizer.shared.local:
+
+ if self.config['maxwidth'].get(int) and not ArtResizer.shared.local:
self.config['maxwidth'] = 0
- log.warn("embedart: ImageMagick or PIL not found; "
- "'maxwidth' option ignored")
+ log.warn(u"embedart: ImageMagick or PIL not found; "
+ u"'maxwidth' option ignored")
+ if self.config['compare_threshold'].get(int) and not \
+ ArtResizer.shared.can_compare:
+ self.config['compare_threshold'] = 0
+ log.warn(u"embedart: ImageMagick 6.8.7 or higher not installed; "
+ u"'compare_threshold' option ignored")
def commands(self):
# Embed command.
@@ -52,12 +62,14 @@ class EmbedCoverArtPlugin(BeetsPlugin):
'-f', '--file', metavar='PATH', help='the image file to embed'
)
maxwidth = config['embedart']['maxwidth'].get(int)
+ compare_threshold = config['embedart']['compare_threshold'].get(int)
def embed_func(lib, opts, args):
if opts.file:
imagepath = normpath(opts.file)
for item in lib.items(decargs(args)):
- embed_item(item, imagepath, maxwidth)
+ embed_item(item, imagepath, maxwidth, None,
+ compare_threshold)
else:
for album in lib.albums(decargs(args)):
embed_album(album, maxwidth)
@@ -72,7 +84,8 @@ class EmbedCoverArtPlugin(BeetsPlugin):
def extract_func(lib, opts, args):
outpath = normpath(opts.outpath or 'cover')
- extract(lib, outpath, decargs(args))
+ item = lib.items(decargs(args)).get()
+ extract(outpath, item)
extract_cmd.func = extract_func
# Clear command.
@@ -94,16 +107,22 @@ def album_imported(lib, album):
embed_album(album, config['embedart']['maxwidth'].get(int))
-def embed_item(item, imagepath, maxwidth=None, itempath=None):
+def embed_item(item, imagepath, maxwidth=None, itempath=None,
+ compare_threshold=0):
"""Embed an image into the item's media file.
"""
+ if compare_threshold:
+ if not check_art_similarity(item, imagepath, compare_threshold):
+ log.warn('Image not similar, skipping it.')
+ return
try:
+ log.info(u'embedart: writing %s', displayable_path(imagepath))
item['images'] = [_mediafile_image(imagepath, maxwidth)]
- item.try_write(itempath)
except IOError as exc:
log.error(u'embedart: could not read image file: {0}'.format(exc))
- finally:
+ else:
# We don't want to store the image in the database
+ item.try_write(itempath)
del item['images']
@@ -124,7 +143,43 @@ def embed_album(album, maxwidth=None):
.format(album))
for item in album.items():
- embed_item(item, imagepath, maxwidth)
+ embed_item(item, imagepath, maxwidth, None,
+ config['embedart']['compare_threshold'].get(int))
+
+
+def check_art_similarity(item, imagepath, compare_threshold):
+ """A boolean indicating if an image is similar to embedded item art.
+ """
+ with NamedTemporaryFile(delete=True) as f:
+ art = extract(f.name, item)
+
+ if art:
+ # Converting images to grayscale tends to minimize the weight
+ # of colors in the diff score
+ cmd = 'convert {0} {1} -colorspace gray MIFF:- | ' \
+ 'compare -metric PHASH - null:'.format(syspath(imagepath),
+ syspath(art))
+
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ close_fds=platform.system() != 'Windows',
+ shell=True)
+ stdout, stderr = proc.communicate()
+ if proc.returncode:
+ if proc.returncode != 1:
+ log.warn(u'embedart: IM phashes compare failed for {0}, \
+ {1}'.format(displayable_path(imagepath),
+ displayable_path(art)))
+ return
+ phashDiff = float(stderr)
+ else:
+ phashDiff = float(stdout)
+
+ log.info(u'embedart: compare PHASH score is {0}'.format(phashDiff))
+ if phashDiff > compare_threshold:
+ return False
+
+ return True
def _mediafile_image(image_path, maxwidth=None):
@@ -142,10 +197,9 @@ def _mediafile_image(image_path, maxwidth=None):
# 'extractart' command.
-def extract(lib, outpath, query):
- item = lib.items(query).get()
+def extract(outpath, item):
if not item:
- log.error('No item matches query.')
+ log.error(u'No item matches query.')
return
# Extract the art.
@@ -159,29 +213,30 @@ def extract(lib, outpath, query):
art = mf.art
if not art:
- log.error('No album art present in %s - %s.' %
- (item.artist, item.title))
+ log.error(u'No album art present in {0} - {1}.'
+ .format(item.artist, item.title))
return
# Add an extension to the filename.
ext = imghdr.what(None, h=art)
if not ext:
- log.error('Unknown image type.')
+ log.error(u'Unknown image type.')
return
outpath += '.' + ext
- log.info(u'Extracting album art from: {0.artist} - {0.title}\n'
- u'To: {1}'.format(item, displayable_path(outpath)))
+ log.info(u'Extracting album art from: {0.artist} - {0.title} '
+ u'to: {1}'.format(item, displayable_path(outpath)))
with open(syspath(outpath), 'wb') as f:
f.write(art)
+ return outpath
# 'clearart' command.
def clear(lib, query):
- log.info('Clearing album art from items:')
+ log.info(u'Clearing album art from items:')
for item in lib.items(query):
- log.info(u'%s - %s' % (item.artist, item.title))
+ log.info(u'{0} - {1}'.format(item.artist, item.title))
try:
mf = mediafile.MediaFile(syspath(item.path),
config['id3v23'].get(bool))
diff --git a/beetsplug/ihate.py b/beetsplug/ihate.py
index 3e86759c6..b55554d8a 100644
--- a/beetsplug/ihate.py
+++ b/beetsplug/ihate.py
@@ -17,7 +17,7 @@
import logging
from beets.plugins import BeetsPlugin
from beets.importer import action
-from beets.library import get_query_sort
+from beets.library import parse_query_string
from beets.library import Item
from beets.library import Album
@@ -55,11 +55,10 @@ class IHatePlugin(BeetsPlugin):
"""
if action_patterns:
for query_string in action_patterns:
- query = None
- if task.is_album:
- (query, _) = get_query_sort(query_string, Album)
- else:
- (query, _) = get_query_sort(query_string, Item)
+ query, _ = parse_query_string(
+ query_string,
+ Album if task.is_album else Item,
+ )
if any(query.match(item) for item in task.imported_items()):
return True
return False
@@ -70,7 +69,7 @@ class IHatePlugin(BeetsPlugin):
if task.choice_flag == action.APPLY:
if skip_queries or warn_queries:
- self._log.debug('[ihate] processing your hate')
+ self._log.debug(u'[ihate] processing your hate')
if self.do_i_hate_this(task, skip_queries):
task.choice_flag = action.SKIP
self._log.info(u'[ihate] skipped: {0}'
@@ -80,6 +79,6 @@ class IHatePlugin(BeetsPlugin):
self._log.info(u'[ihate] you maybe hate this: {0}'
.format(summary(task)))
else:
- self._log.debug('[ihate] nothing to do')
+ self._log.debug(u'[ihate] nothing to do')
else:
- self._log.debug('[ihate] user made a decision, nothing to do')
+ self._log.debug(u'[ihate] user made a decision, nothing to do')
diff --git a/beetsplug/importadded.py b/beetsplug/importadded.py
index 539853859..8b4b7c6b5 100644
--- a/beetsplug/importadded.py
+++ b/beetsplug/importadded.py
@@ -44,8 +44,8 @@ def write_item_mtime(item, mtime):
item's file.
"""
if mtime is None:
- log.warn("No mtime to be preserved for item "
- + util.displayable_path(item.path))
+ log.warn(u"No mtime to be preserved for item {0}"
+ .format(util.displayable_path(item.path)))
return
# The file's mtime on disk must be in sync with the item's mtime
@@ -64,10 +64,9 @@ def record_import_mtime(item, source, destination):
mtime = os.stat(util.syspath(source)).st_mtime
item_mtime[destination] = mtime
- log.debug('Recorded mtime %s for item "%s" imported from "%s"',
- mtime,
- util.displayable_path(destination),
- util.displayable_path(source))
+ log.debug(u"Recorded mtime {0} for item '{1}' imported from '{2}'".format(
+ mtime, util.displayable_path(destination),
+ util.displayable_path(source)))
@ImportAddedPlugin.listen('album_imported')
diff --git a/beetsplug/importfeeds.py b/beetsplug/importfeeds.py
index cf367fb5b..0f1cd11c8 100644
--- a/beetsplug/importfeeds.py
+++ b/beetsplug/importfeeds.py
@@ -13,17 +13,20 @@
# included in all copies or substantial portions of the Software.
"""Write paths of imported files in various formats to ease later import in a
-music player.
+music player. Also allow printing the new file locations to stdout in case
+one wants to manually add music to a player by its path.
"""
import datetime
import os
import re
+import logging
from beets.plugins import BeetsPlugin
from beets.util import normpath, syspath, bytestring_path
from beets import config
M3U_DEFAULT_NAME = 'imported.m3u'
+log = logging.getLogger('beets')
class ImportFeedsPlugin(BeetsPlugin):
@@ -126,6 +129,11 @@ def _record_items(lib, basename, items):
if not os.path.exists(syspath(dest)):
os.symlink(syspath(path), syspath(dest))
+ if 'echo' in formats:
+ log.info("Location of imported music:")
+ for path in paths:
+ log.info(" " + path)
+
@ImportFeedsPlugin.listen('library_opened')
def library_opened(lib):
diff --git a/beetsplug/info.py b/beetsplug/info.py
index f7462761a..180f35747 100644
--- a/beetsplug/info.py
+++ b/beetsplug/info.py
@@ -52,7 +52,7 @@ def run(lib, opts, args):
try:
data = data_emitter()
except mediafile.UnreadableFileError as ex:
- log.error('cannot read file: {0}'.format(ex.message))
+ log.error(u'cannot read file: {0}'.format(ex.message))
continue
if opts.summarize:
diff --git a/beetsplug/inline.py b/beetsplug/inline.py
index b0142a934..33c24b777 100644
--- a/beetsplug/inline.py
+++ b/beetsplug/inline.py
@@ -64,7 +64,7 @@ def compile_inline(python_code, album):
try:
func = _compile_func(python_code)
except SyntaxError:
- log.error(u'syntax error in inline field definition:\n%s' %
+ log.error(u'syntax error in inline field definition:\n{0}',
traceback.format_exc())
return
else:
@@ -112,14 +112,14 @@ class InlinePlugin(BeetsPlugin):
# Item fields.
for key, view in itertools.chain(config['item_fields'].items(),
config['pathfields'].items()):
- log.debug(u'inline: adding item field %s' % key)
+ log.debug(u'inline: adding item field {0}'.format(key))
func = compile_inline(view.get(unicode), False)
if func is not None:
self.template_fields[key] = func
# Album fields.
for key, view in config['album_fields'].items():
- log.debug(u'inline: adding album field %s' % key)
+ log.debug(u'inline: adding album field {0}'.format(key))
func = compile_inline(view.get(unicode), True)
if func is not None:
self.album_template_fields[key] = func
diff --git a/beetsplug/lastgenre/__init__.py b/beetsplug/lastgenre/__init__.py
index 3a9654c7d..ea0537988 100644
--- a/beetsplug/lastgenre/__init__.py
+++ b/beetsplug/lastgenre/__init__.py
@@ -67,7 +67,7 @@ def _tags_for(obj, min_weight=None):
else:
res = obj.get_top_tags()
except PYLAST_EXCEPTIONS as exc:
- log.debug(u'last.fm error: %s' % unicode(exc))
+ log.debug(u'last.fm error: {0}'.format(exc))
return []
# Filter by weight (optionally).
@@ -369,11 +369,9 @@ class LastGenrePlugin(plugins.BeetsPlugin):
if 'track' in self.sources:
item.genre, src = self._get_genre(item)
item.store()
- log.info(
- u'genre for track {0} - {1} ({2}): {3}'. format(
- item.artist, item.title, src, item.genre
- )
- )
+ log.info(u'genre for track {0} - {1} ({2}): {3}'
+ .format(item.artist, item.title, src,
+ item.genre))
if write:
item.try_write()
@@ -387,22 +385,19 @@ class LastGenrePlugin(plugins.BeetsPlugin):
album = task.album
album.genre, src = self._get_genre(album)
log.debug(u'added last.fm album genre ({0}): {1}'.format(
- src, album.genre
- ))
+ src, album.genre))
album.store()
if 'track' in self.sources:
for item in album.items():
item.genre, src = self._get_genre(item)
log.debug(u'added last.fm item genre ({0}): {1}'.format(
- src, item.genre
- ))
+ src, item.genre))
item.store()
else:
item = task.item
item.genre, src = self._get_genre(item)
log.debug(u'added last.fm item genre ({0}): {1}'.format(
- src, item.genre
- ))
+ src, item.genre))
item.store()
diff --git a/beetsplug/lyrics.py b/beetsplug/lyrics.py
index 186e83d6e..00954e2c3 100644
--- a/beetsplug/lyrics.py
+++ b/beetsplug/lyrics.py
@@ -33,10 +33,10 @@ from beets import config
log = logging.getLogger('beets')
-DIV_RE = re.compile(r'<(/?)div>?')
+DIV_RE = re.compile(r'<(/?)div>?', re.I)
COMMENT_RE = re.compile(r'', re.S)
TAG_RE = re.compile(r'<[^>]*>')
-BREAK_RE = re.compile(r'
')
+BREAK_RE = re.compile(r'\n?\s*
]*)*>\s*\n?', re.I)
URL_CHARACTERS = {
u'\u2018': u"'",
u'\u2019': u"'",
@@ -111,25 +111,7 @@ def extract_text(html, starttag):
print('no closing tag found!')
return
lyrics = ''.join(parts)
- return strip_cruft(lyrics)
-
-
-def strip_cruft(lyrics, wscollapse=True):
- """Clean up HTML from an extracted lyrics string. For example,
- tags are replaced with newlines.
- """
- lyrics = COMMENT_RE.sub('', lyrics)
- lyrics = unescape(lyrics)
- if wscollapse:
- lyrics = re.sub(r'\s+', ' ', lyrics) # Whitespace collapse.
- lyrics = re.sub(r'<(script).*?\1>(?s)', '', lyrics) # Strip script tags.
- lyrics = BREAK_RE.sub('\n', lyrics) #
newlines.
- lyrics = re.sub(r'\n +', '\n', lyrics)
- lyrics = re.sub(r' +\n', '\n', lyrics)
- lyrics = TAG_RE.sub('', lyrics) # Strip remaining HTML tags.
- lyrics = lyrics.replace('\r', '\n')
- lyrics = lyrics.strip()
- return lyrics
+ return _scrape_strip_cruft(lyrics, True)
def search_pairs(item):
@@ -140,7 +122,7 @@ def search_pairs(item):
In addition to the artist and title obtained from the `item` the
method tries to strip extra information like paranthesized suffixes
- and featured artists from the strings and add them as caniddates.
+ and featured artists from the strings and add them as candidates.
The method also tries to split multiple titles separated with `/`.
"""
@@ -264,7 +246,7 @@ def slugify(text):
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore')
text = unicode(re.sub('[-\s]+', ' ', text))
except UnicodeDecodeError:
- log.exception("Failing to normalize '%s'" % (text))
+ log.exception(u"Failing to normalize '{0}'".format(text))
return text
@@ -294,36 +276,6 @@ def is_page_candidate(urlLink, urlTitle, title, artist):
return difflib.SequenceMatcher(None, songTitle, title).ratio() >= typoRatio
-def insert_line_feeds(text):
- """Insert newlines before upper-case characters.
- """
- tokensStr = re.split("([a-z][A-Z])", text)
- for idx in range(1, len(tokensStr), 2):
- ltoken = list(tokensStr[idx])
- tokensStr[idx] = ltoken[0] + '\n' + ltoken[1]
- return ''.join(tokensStr)
-
-
-def sanitize_lyrics(text):
- """Clean text, returning raw lyrics as output or None if it happens
- that input text is actually not lyrics content. Clean (x)html tags
- in text, correct layout and syntax...
- """
- text = strip_cruft(text, False)
-
- # Restore \n in input text
- if '\n' not in text:
- text = insert_line_feeds(text)
-
- while text.count('\n\n') > text.count('\n') // 4:
- # Remove first occurrence of \n for each sequence of \n
- text = re.sub(r'\n(\n+)', '\g<1>', text)
-
- text = re.sub(r'\n\n+', '\n\n', text) # keep at most two \n in a row
-
- return text
-
-
def remove_credits(text):
"""Remove first/last line of text if it contains the word 'lyrics'
eg 'Lyrics by songsdatabase.com'
@@ -343,11 +295,11 @@ def is_lyrics(text, artist=None):
"""
if not text:
return
-
badTriggersOcc = []
nbLines = text.count('\n')
if nbLines <= 1:
- log.debug("Ignoring too short lyrics '%s'" % text)
+ log.debug(u"Ignoring too short lyrics '{0}'".format(
+ text.decode('utf8')))
return 0
elif nbLines < 5:
badTriggersOcc.append('too_short')
@@ -356,7 +308,7 @@ def is_lyrics(text, artist=None):
# down
text = remove_credits(text)
- badTriggers = ['lyrics', 'copyright', 'property']
+ badTriggers = ['lyrics', 'copyright', 'property', 'links']
if artist:
badTriggersOcc += [artist]
@@ -365,63 +317,58 @@ def is_lyrics(text, artist=None):
text, re.I))
if badTriggersOcc:
- log.debug('Bad triggers detected: %s' % badTriggersOcc)
+ log.debug(u'Bad triggers detected: {0}'.format(badTriggersOcc))
return len(badTriggersOcc) < 2
-def scrape_lyrics_from_url(url):
+def _scrape_strip_cruft(html, plain_text_out=False):
+ """Clean up HTML
+ """
+ html = unescape(html)
+
+ # Normalize EOL
+ html = html.replace('\r', '\n')
+ html = re.sub(r' +', ' ', html) # Whitespaces collapse.
+ html = BREAK_RE.sub('\n', html) #
eats up surrounding '\n'
+
+ if plain_text_out: # Strip remaining HTML tags
+ html = TAG_RE.sub('', html)
+ html = COMMENT_RE.sub('', html)
+
+ # Strip lines
+ html = '\n'.join([x.strip() for x in html.strip().split('\n')])
+ return html
+
+
+def _scrape_merge_paragraphs(html):
+ return re.sub(r'
\s*]*)>', '\n', html)
+
+
+def scrape_lyrics_from_html(html):
"""Scrape lyrics from a URL. If no lyrics can be found, return None
instead.
"""
- from bs4 import BeautifulSoup, Comment
- html = fetch_url(url)
+ from bs4 import SoupStrainer, BeautifulSoup
+
if not html:
return None
- soup = BeautifulSoup(html)
+ def is_text_notcode(string):
+ length = len(string)
+ return (length > 20 and
+ string.count(' ') > length / 25
+ and (string.find('=') == -1 or string.find(';') == 1))
- for tag in soup.findAll('br'):
- tag.replaceWith('\n')
+ html = _scrape_strip_cruft(html)
+ html = _scrape_merge_paragraphs(html)
- # Remove non relevant html parts
- [s.extract() for s in soup(['head', 'script'])]
- comments = soup.findAll(text=lambda text: isinstance(text, Comment))
- [s.extract() for s in comments]
+ # extract all long text blocks that are not code
+ soup = BeautifulSoup(html, "html.parser",
+ parse_only=SoupStrainer(text=is_text_notcode))
+ soup = sorted(soup.stripped_strings, key=len)[-1]
- try:
- for tag in soup.findAll(True):
- tag.name = 'p' # keep tag contents
-
- except Exception, e:
- log.debug('Error %s when replacing containing marker by p marker' % e,
- exc_info=True)
-
- # Make better soup from current soup! The previous unclosed
sections
- # are now closed. Use str() rather than prettify() as it's more
- # conservative concerning EOL
- soup = BeautifulSoup(str(soup))
-
- # In case lyrics are nested in no markup but
- # Insert the whole body in a
- bodyTag = soup.find('body')
- if bodyTag:
- pTag = soup.new_tag("p")
- bodyTag.parent.insert(0, pTag)
- pTag.insert(0, bodyTag)
-
- tagTokens = []
-
- for tag in soup.findAll('p'):
- soup2 = BeautifulSoup(str(tag))
- # Extract all text of
section.
- tagTokens += soup2.findAll(text=True)
-
- if tagTokens:
- # Lyrics are expected to be the longest paragraph
- tagTokens = sorted(tagTokens, key=len, reverse=True)
- soup = BeautifulSoup(tagTokens[0])
- return unescape(tagTokens[0].strip("\n\r: "))
+ return soup
def fetch_google(artist, title):
@@ -437,7 +384,7 @@ def fetch_google(artist, title):
data = json.load(data)
if 'error' in data:
reason = data['error']['errors'][0]['reason']
- log.debug(u'google lyrics backend error: %s' % reason)
+ log.debug(u'google lyrics backend error: {0}'.format(reason))
return
if 'items' in data.keys():
@@ -446,14 +393,14 @@ def fetch_google(artist, title):
urlTitle = item['title']
if not is_page_candidate(urlLink, urlTitle, title, artist):
continue
- lyrics = scrape_lyrics_from_url(urlLink)
+
+ html = fetch_url(urlLink)
+ lyrics = scrape_lyrics_from_html(html)
if not lyrics:
continue
- lyrics = sanitize_lyrics(lyrics)
-
if is_lyrics(lyrics, artist):
- log.debug(u'got lyrics from %s' % item['displayLink'])
+ log.debug(u'got lyrics from {0}'.format(item['displayLink']))
return lyrics
@@ -514,8 +461,8 @@ class LyricsPlugin(BeetsPlugin):
"""
# Skip if the item already has lyrics.
if not force and item.lyrics:
- log.log(loglevel, u'lyrics already present: %s - %s' %
- (item.artist, item.title))
+ log.log(loglevel, u'lyrics already present: {0} - {1}'
+ .format(item.artist, item.title))
return
lyrics = None
@@ -527,11 +474,11 @@ class LyricsPlugin(BeetsPlugin):
lyrics = u"\n\n---\n\n".join([l for l in lyrics if l])
if lyrics:
- log.log(loglevel, u'fetched lyrics: %s - %s' %
- (item.artist, item.title))
+ log.log(loglevel, u'fetched lyrics: {0} - {1}'
+ .format(item.artist, item.title))
else:
- log.log(loglevel, u'lyrics not found: %s - %s' %
- (item.artist, item.title))
+ log.log(loglevel, u'lyrics not found: {0} - {1}'
+ .format(item.artist, item.title))
fallback = self.config['fallback'].get()
if fallback:
lyrics = fallback
@@ -553,7 +500,6 @@ class LyricsPlugin(BeetsPlugin):
if lyrics:
if isinstance(lyrics, str):
lyrics = lyrics.decode('utf8', 'ignore')
- log.debug(u'got lyrics from backend: {0}'.format(
- backend.__name__
- ))
+ log.debug(u'got lyrics from backend: {0}'
+ .format(backend.__name__))
return lyrics.strip()
diff --git a/beetsplug/missing.py b/beetsplug/missing.py
index 48ac11b67..addc0ae28 100644
--- a/beetsplug/missing.py
+++ b/beetsplug/missing.py
@@ -43,7 +43,7 @@ def _missing(album):
for track_info in getattr(album_info, 'tracks', []):
if track_info.track_id not in item_mbids:
item = _item(track_info, album_info, album.id)
- log.debug('{0}: track {1} in album {2}'
+ log.debug(u'{0}: track {1} in album {2}'
.format(PLUGIN,
track_info.track_id,
album_info.album_id))
diff --git a/beetsplug/mpdstats.py b/beetsplug/mpdstats.py
index f03e284e3..c198445dc 100644
--- a/beetsplug/mpdstats.py
+++ b/beetsplug/mpdstats.py
@@ -313,7 +313,7 @@ class MPDStatsPlugin(plugins.BeetsPlugin):
item_types = {
'play_count': types.INTEGER,
'skip_count': types.INTEGER,
- 'last_played': library.Date(),
+ 'last_played': library.DateType(),
'rating': types.FLOAT,
}
diff --git a/beetsplug/play.py b/beetsplug/play.py
index fb4167124..38ef379dd 100644
--- a/beetsplug/play.py
+++ b/beetsplug/play.py
@@ -105,6 +105,8 @@ def play_music(lib, opts, args):
ui.print_(u'Playing {0} {1}.'.format(len(selection), item_type))
+ util.remove(m3u.name)
+
class PlayPlugin(BeetsPlugin):
diff --git a/beetsplug/replaygain.py b/beetsplug/replaygain.py
index b93e13b33..4857566d9 100644
--- a/beetsplug/replaygain.py
+++ b/beetsplug/replaygain.py
@@ -135,7 +135,7 @@ class CommandBackend(Backend):
supported_items = filter(self.format_supported, album.items())
if len(supported_items) != len(album.items()):
- log.debug('replaygain: tracks are of unsupported format')
+ log.debug(u'replaygain: tracks are of unsupported format')
return AlbumGain(None, [])
output = self.compute_gain(supported_items, True)
@@ -198,6 +198,9 @@ class CommandBackend(Backend):
out = []
for line in text.split('\n')[1:num_lines + 1]:
parts = line.split('\t')
+ if len(parts) != 6 or parts[0] == 'File':
+ log.debug(u'replaygain: bad tool output: {0}'.format(text))
+ raise ReplayGainError('mp3gain failed')
d = {
'file': parts[0],
'mp3gain': int(parts[1]),
@@ -577,12 +580,12 @@ class ReplayGainPlugin(BeetsPlugin):
in the item, nothing is done.
"""
if not self.track_requires_gain(item):
- log.info(u'Skipping track {0} - {1}'.format(item.artist,
- item.title))
+ log.info(u'Skipping track {0} - {1}'
+ .format(item.artist, item.title))
return
- log.info(u'analyzing {0} - {1}'.format(item.artist,
- item.title))
+ log.info(u'analyzing {0} - {1}'
+ .format(item.artist, item.title))
try:
track_gains = self.backend_instance.compute_track_gain([item])
diff --git a/beetsplug/rewrite.py b/beetsplug/rewrite.py
index 44fd3753e..55b705492 100644
--- a/beetsplug/rewrite.py
+++ b/beetsplug/rewrite.py
@@ -59,7 +59,7 @@ class RewritePlugin(BeetsPlugin):
if fieldname not in library.Item._fields:
raise ui.UserError("invalid field name (%s) in rewriter" %
fieldname)
- log.debug(u'adding template field %s' % key)
+ log.debug(u'adding template field {0}'.format(key))
pattern = re.compile(pattern.lower())
rules[fieldname].append((pattern, value))
if fieldname == 'artist':
diff --git a/beetsplug/scrub.py b/beetsplug/scrub.py
index af1a77370..c53c27590 100644
--- a/beetsplug/scrub.py
+++ b/beetsplug/scrub.py
@@ -64,7 +64,8 @@ class ScrubPlugin(BeetsPlugin):
# Walk through matching files and remove tags.
for item in lib.items(ui.decargs(args)):
- log.info(u'scrubbing: %s' % util.displayable_path(item.path))
+ log.info(u'scrubbing: {0}'.format(
+ util.displayable_path(item.path)))
# Get album art if we need to restore it.
if opts.write:
@@ -80,7 +81,7 @@ class ScrubPlugin(BeetsPlugin):
log.debug(u'writing new tags after scrub')
item.try_write()
if art:
- log.info('restoring art')
+ log.info(u'restoring art')
mf = mediafile.MediaFile(item.path)
mf.art = art
mf.save()
@@ -132,8 +133,7 @@ def _scrub(path):
f.save()
except IOError as exc:
log.error(u'could not scrub {0}: {1}'.format(
- util.displayable_path(path),
- exc,
+ util.displayable_path(path), exc,
))
@@ -141,5 +141,5 @@ def _scrub(path):
@ScrubPlugin.listen('write')
def write_item(path):
if not scrubbing and config['scrub']['auto']:
- log.debug(u'auto-scrubbing %s' % util.displayable_path(path))
+ log.debug(u'auto-scrubbing {0}'.format(util.displayable_path(path)))
_scrub(path)
diff --git a/beetsplug/smartplaylist.py b/beetsplug/smartplaylist.py
index 6beb0ad59..653a59d30 100644
--- a/beetsplug/smartplaylist.py
+++ b/beetsplug/smartplaylist.py
@@ -42,7 +42,7 @@ def _items_for_query(lib, playlist, album=False):
query_strings = [query_strings]
model = library.Album if album else library.Item
query = dbcore.OrQuery(
- [library.get_query_sort(q, model)[0] for q in query_strings]
+ [library.parse_query_string(q, model)[0] for q in query_strings]
)
# Execute query, depending on type.
diff --git a/beetsplug/spotify.py b/beetsplug/spotify.py
index 59fe687a6..a31754150 100644
--- a/beetsplug/spotify.py
+++ b/beetsplug/spotify.py
@@ -63,7 +63,8 @@ class SpotifyPlugin(BeetsPlugin):
self.config['show_failures'].set(True)
if self.config['mode'].get() not in ['list', 'open']:
- log.warn(self.config['mode'].get() + " is not a valid mode")
+ log.warn(u'{0} is not a valid mode'
+ .format(self.config['mode'].get()))
return False
self.opts = opts
@@ -77,10 +78,10 @@ class SpotifyPlugin(BeetsPlugin):
items = lib.items(query)
if not items:
- log.debug("Your beets query returned no items, skipping spotify")
+ log.debug(u'Your beets query returned no items, skipping spotify')
return
- log.info("Processing " + str(len(items)) + " tracks...")
+ log.info(u'Processing {0} tracks...'.format(len(items)))
for item in items:
@@ -112,7 +113,8 @@ class SpotifyPlugin(BeetsPlugin):
try:
r.raise_for_status()
except HTTPError as e:
- log.debug("URL returned a " + e.response.status_code + "error")
+ log.debug(u'URL returned a {0} error'
+ .format(e.response.status_code))
failures.append(search_url)
continue
@@ -128,35 +130,33 @@ class SpotifyPlugin(BeetsPlugin):
# Simplest, take the first result
chosen_result = None
if len(r_data) == 1 or self.config['tiebreak'].get() == "first":
- log.debug("Spotify track(s) found, count: " + str(len(r_data)))
+ log.debug(u'Spotify track(s) found, count: {0}'
+ .format(len(r_data)))
chosen_result = r_data[0]
elif len(r_data) > 1:
# Use the popularity filter
- log.debug(
- "Most popular track chosen, count: " + str(len(r_data))
- )
+ log.debug(u'Most popular track chosen, count: {0}'
+ .format(len(r_data)))
chosen_result = max(r_data, key=lambda x: x['popularity'])
if chosen_result:
results.append(chosen_result)
else:
- log.debug("No spotify track found: " + search_url)
+ log.debug(u'No spotify track found: {0}'.format(search_url))
failures.append(search_url)
failure_count = len(failures)
if failure_count > 0:
if self.config['show_failures'].get():
- log.info("{0} track(s) did not match a Spotify ID:".format(
- failure_count
- ))
+ log.info(u'{0} track(s) did not match a Spotify ID:'
+ .format(failure_count))
for track in failures:
- log.info("track:" + track)
- log.info("")
+ log.info(u'track: {0}'.format(track))
+ log.info(u'')
else:
- log.warn(
- str(failure_count) + " track(s) did not match "
- "a Spotify ID; use --show-failures to display\n"
- )
+ log.warn(u'{0} track(s) did not match a Spotify ID;\n'
+ u'use --show-failures to display'
+ .format(failure_count))
return results
@@ -164,7 +164,7 @@ class SpotifyPlugin(BeetsPlugin):
if results:
ids = map(lambda x: x['id'], results)
if self.config['mode'].get() == "open":
- log.info("Attempting to open Spotify with playlist")
+ log.info(u'Attempting to open Spotify with playlist')
spotify_url = self.playlist_partial + ",".join(ids)
webbrowser.open(spotify_url)
@@ -172,4 +172,4 @@ class SpotifyPlugin(BeetsPlugin):
for item in ids:
print(unicode.encode(self.open_url + item))
else:
- log.warn("No Spotify tracks found from beets query")
+ log.warn(u'No Spotify tracks found from beets query')
diff --git a/beetsplug/zero.py b/beetsplug/zero.py
index 30f287aea..8c65fe855 100644
--- a/beetsplug/zero.py
+++ b/beetsplug/zero.py
@@ -78,19 +78,18 @@ class ZeroPlugin(BeetsPlugin):
return True
return False
- def write_event(self, item):
+ def write_event(self, item, path, tags):
"""Listen for write event."""
if not self.patterns:
log.warn(u'[zero] no fields, nothing to do')
return
for field, patterns in self.patterns.items():
- try:
- value = getattr(item, field)
- except AttributeError:
+ if field not in tags:
log.error(u'[zero] no such field: {0}'.format(field))
continue
+ value = tags[field]
if self.match_patterns(value, patterns):
log.debug(u'[zero] {0}: {1} -> None'.format(field, value))
- setattr(item, field, None)
+ tags[field] = None
diff --git a/docs/changelog.rst b/docs/changelog.rst
index db1b82b7f..be6f77bf5 100644
--- a/docs/changelog.rst
+++ b/docs/changelog.rst
@@ -1,17 +1,63 @@
Changelog
=========
-1.3.8 (in development)
+1.3.9 (in development)
----------------------
-This release adds **sorting** to beets queries. See :ref:`query-sort`.
-
Features:
-* :doc:`/plugins/info`: Files can be specified through library queries
- and the ``--library`` option prints library fields instead of tags.
- Tags and library fields for multiple files can be summarized with the
- ``--summarize`` option.
+* :doc:`/plugins/embedart`: You can now automatically check that new art looks
+ similar to existing art---ensuring that you only get a better "version" of
+ the art you already have. See :ref:`image-similarity-check`.
+* Re-imports of your existing music (see :ref:`reimport`) now preserve its
+ added date and flexible attributes. Thanks to Stig Inge Lea Bjørnsen.
+* :doc:`/plugins/bpd`: Add a new configuration option for setting the default
+ volume. Thanks to IndiGit.
+
+Fixes:
+
+* :doc:`/plugins/convert`: Does not crash when embedding cover art
+ fails.
+* :doc:`/plugins/mpdstats`: Fix an error on start (introduced in the previous
+ version). Thanks to Zach Denton.
+* :doc:`/plugins/convert`: The ``--yes`` command-line flag no longer expects
+ an argument.
+* :doc:`/plugins/play`: Remove the temporary .m3u file after sending it to
+ the player.
+* The importer no longer tries to highlight partial differences in numeric
+ quantities (track numbers and durations), which was often confusing.
+* Date-based queries that are malformed (not parse-able) no longer crash
+ beets and instead fail silently.
+
+
+1.3.8 (September 17, 2014)
+--------------------------
+
+This release has two big new chunks of functionality. Queries now support
+**sorting** and user-defined fields can now have **types**.
+
+If you want to see all your songs in reverse chronological order, just type
+``beet list year-``. It couldn't be easier. For details, see
+:ref:`query-sort`.
+
+Flexible field types mean that some functionality that has previously only
+worked for built-in fields, like range queries, can now work with plugin- and
+user-defined fields too. For starters, the :doc:`/plugins/echonest/` and
+:doc:`/plugins/mpdstats` now mark the types of the fields they provide---so
+you can now say, for example, ``beet ls liveness:0.5..1.5`` for the Echo Nest
+"liveness" attribute. The :doc:`/plugins/types` makes it easy to specify field
+types in your config file.
+
+One upgrade note: if you use the :doc:`/plugins/discogs`, you will need to
+upgrade the Discogs client library to use this version. Just type
+``pip install -U discogs-client``.
+
+Other new features:
+
+* :doc:`/plugins/info`: Target files can now be specified through library
+ queries (in addition to filenames). The ``--library`` option prints library
+ fields instead of tags. Multiple files can be summarized together with the
+ new ``--summarize`` option.
* :doc:`/plugins/mbcollection`: A new option lets you automatically update
your collection on import. Thanks to Olin Gay.
* :doc:`/plugins/convert`: A new ``never_convert_lossy_files`` option can
@@ -19,7 +65,7 @@ Features:
* :doc:`/plugins/convert`: A new ``--yes`` command-line flag skips the
confirmation.
-Fixes:
+Still more fixes and little improvements:
* Invalid state files don't crash the importer.
* :doc:`/plugins/lyrics`: Only strip featured artists and
@@ -49,6 +95,17 @@ Fixes:
* The ``--version`` flag now works as an alias for the ``version`` command.
* :doc:`/plugins/lastgenre`: Remove some unhelpful genres from the default
whitelist. Thanks to gwern.
+* :doc:`/plugins/importfeeds`: A new ``echo`` output mode prints files' paths
+ to standard error. Thanks to robotanarchy.
+* :doc:`/plugins/replaygain`: Restore some error handling when ``mp3gain``
+ output cannot be parsed. The verbose log now contains the bad tool output in
+ this case.
+* :doc:`/plugins/convert`: Fix filename extensions when converting
+ automatically.
+* The ``write`` plugin event allows plugins to change the tags that are
+ written to a media file.
+* :doc:`/plugins/zero`: Do not delete database values; only media file
+ tags are affected.
.. _discogs_client: https://github.com/discogs/discogs_client
diff --git a/docs/conf.py b/docs/conf.py
index 291685116..e5c81a4c5 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -12,7 +12,7 @@ project = u'beets'
copyright = u'2012, Adrian Sampson'
version = '1.3'
-release = '1.3.8'
+release = '1.3.9'
pygments_style = 'sphinx'
diff --git a/docs/dev/plugins.rst b/docs/dev/plugins.rst
index c585c9001..768f982d8 100644
--- a/docs/dev/plugins.rst
+++ b/docs/dev/plugins.rst
@@ -143,11 +143,14 @@ currently available are:
or album's part) is removed from the library (even when its file is not
deleted from disk).
-* *write*: called with an ``Item`` object just before a file's metadata is
- written to disk (i.e., just before the file on disk is opened). Event
- handlers may raise a ``library.FileOperationError`` exception to abort
- the write operation. Beets will catch that exception, print an error
- message and continue.
+* *write*: called with an ``Item`` object, a ``path``, and a ``tags``
+ dictionary just before a file's metadata is written to disk (i.e.,
+ just before the file on disk is opened). Event handlers may change
+ the ``tags`` dictionary to customize the tags that are written to the
+ media file. Event handlers may also raise a
+ ``library.FileOperationError`` exception to abort the write
+ operation. Beets will catch that exception, print an error message
+ and continue.
* *after_write*: called with an ``Item`` object after a file's metadata is
written to disk (i.e., just after the file on disk is closed).
@@ -403,8 +406,9 @@ Flexible Field Types
^^^^^^^^^^^^^^^^^^^^
If your plugin uses flexible fields to store numbers or other
-non-string values you can specify the types of those fields. A rating
-plugin, for example might look like this. ::
+non-string values, you can specify the types of those fields. A rating
+plugin, for example, might want to declare that the ``rating`` field
+should have an integer type::
from beets.plugins import BeetsPlugin
from beets.dbcore import types
@@ -416,18 +420,18 @@ plugin, for example might look like this. ::
def album_types(self):
return {'rating': types.INTEGER}
-A plugin may define two attributes, `item_types` and `album_types`.
+A plugin may define two attributes: `item_types` and `album_types`.
Each of those attributes is a dictionary mapping a flexible field name
to a type instance. You can find the built-in types in the
`beets.dbcore.types` and `beets.library` modules or implement your own
-ones.
+type by inheriting from the `Type` class.
-Specifying types has the following advantages.
+Specifying types has several advantages:
-* The flexible field accessors ``item['my_field']`` return the
- specified type instead of a string.
+* Code that accesses the field like ``item['my_field']`` gets the right
+ type (instead of just a string).
-* Users can use advanced queries (like :ref:`ranges `)
+* You can use advanced queries (like :ref:`ranges `)
from the command line.
-* User input for flexible fields may be validated.
+* User input for flexible fields may be validated and converted.
diff --git a/docs/plugins/bpd.rst b/docs/plugins/bpd.rst
index 81736ae30..919a45ec3 100644
--- a/docs/plugins/bpd.rst
+++ b/docs/plugins/bpd.rst
@@ -71,12 +71,14 @@ on your headless server box. Rad!
To configure the BPD server, add a ``bpd:`` section to your ``config.yaml``
file. The configuration values, which are pretty self-explanatory, are ``host``,
-``port``, and ``password``. Here's an example::
+``port``, ``password`` and ``volume``. The volume option sets the initial
+volume (in percent, default: 100). Here's an example::
bpd:
host: 127.0.0.1
port: 6600
password: seekrit
+ volume: 100
Implementation Notes
--------------------
diff --git a/docs/plugins/embedart.rst b/docs/plugins/embedart.rst
index 1200071cf..cb2047c3b 100644
--- a/docs/plugins/embedart.rst
+++ b/docs/plugins/embedart.rst
@@ -19,6 +19,26 @@ embedded after each album is added to the library.
This behavior can be disabled with the ``auto`` config option (see below).
+.. _image-similarity-check:
+
+Image Similarity
+''''''''''''''''
+
+When importing a lot of files with the ``auto`` option, one may be reluctant to
+overwrite existing embedded art for all of them.
+
+You can tell beets to avoid embedding images that are too different from the
+This works by computing the perceptual hashes (`PHASH`_) of the two images and
+checking that the difference between the two does not exceed a
+threshold. You can set the threshold with the ``compare_threshold`` option.
+
+A threshold of 0 (the default) disables similarity checking and always embeds
+new images. Set the threshold to another number---we recommend between 10 and
+100---to adjust the sensitivity of the comparison. The smaller the threshold
+number, the more similar the images must be.
+
+This feature requires `ImageMagick`_.
+
Manually Embedding and Extracting Art
-------------------------------------
@@ -53,7 +73,13 @@ before embedding them (the original image file is not altered). The resize
operation reduces image width to ``maxwidth`` pixels. The height is recomputed
so that the aspect ratio is preserved. `PIL`_ or `ImageMagick`_ is required to
use the ``maxwidth`` config option. See also :ref:`image-resizing` for further
-caveats about image resizing.
+so that the aspect ratio is preserved.
+
+The ``compare_threshold`` option defines how similar must candidate art be
+regarding to embedded art to be written to the file (see
+:ref:`image-similarity-check`). The default is 0 (no similarity check).
+Requires `ImageMagick`_.
.. _PIL: http://www.pythonware.com/products/pil/
.. _ImageMagick: http://www.imagemagick.org/
+.. _PHASH: http://www.fmwconcepts.com/misc_tests/perceptual_hash_test_results_510/
diff --git a/docs/plugins/importfeeds.rst b/docs/plugins/importfeeds.rst
index dcc2ae653..3d8155e26 100644
--- a/docs/plugins/importfeeds.rst
+++ b/docs/plugins/importfeeds.rst
@@ -18,12 +18,13 @@ root of your music library.
The ``absolute_path`` configuration option can be set to use absolute paths
instead of relative paths. Some applications may need this to work properly.
-Three different types of outputs coexist, specify the ones you want to use by
-setting the ``formats`` parameter:
+Four different types of outputs are available. Specify the ones you want to
+use by setting the ``formats`` parameter:
- ``m3u``: catalog the imports in a centralized playlist. By default, the playlist is named ``imported.m3u``. To use a different file, just set the ``m3u_name`` parameter inside the ``importfeeds`` config section.
- ``m3u_multi``: create a new playlist for each import (uniquely named by appending the date and track/album name).
- ``link``: create a symlink for each imported item. This is the recommended setting to propagate beets imports to your iTunes library: just drag and drop the ``dir`` folder on the iTunes dock icon.
+- ``echo``: do not write a playlist file at all, but echo a list of new file paths to the terminal.
Here's an example configuration for this plugin::
diff --git a/docs/plugins/types.rst b/docs/plugins/types.rst
index 41419d758..978654e8d 100644
--- a/docs/plugins/types.rst
+++ b/docs/plugins/types.rst
@@ -4,14 +4,14 @@ Types Plugin
The ``types`` plugin lets you declare types for attributes you use in your
library. For example, you can declare that a ``rating`` field is numeric so
that you can query it with ranges---which isn't possible when the field is
-considered a string, which is the default.
+considered a string (the default).
Enable the plugin as described in :doc:`/plugins/index` and then add a
``types`` section to your :doc:`configuration file `. The
configuration section should map field name to one of ``int``, ``float``,
``bool``, or ``date``.
-Here's an example:
+Here's an example::
types:
rating: int
diff --git a/docs/reference/cli.rst b/docs/reference/cli.rst
index d8dff4c33..96c5e3264 100644
--- a/docs/reference/cli.rst
+++ b/docs/reference/cli.rst
@@ -132,6 +132,8 @@ Optional command flags:
.. only:: html
+ .. _reimport:
+
Reimporting
^^^^^^^^^^^
diff --git a/docs/reference/config.rst b/docs/reference/config.rst
index 0d3d4b644..75592e499 100644
--- a/docs/reference/config.rst
+++ b/docs/reference/config.rst
@@ -193,18 +193,16 @@ option overrides this setting.
sort_item
~~~~~~~~~
-Sort order to use when listing *individual items* with the :ref:`list-cmd`
-command and other commands that need to print out items. Defaults to
-``smartartist+``. Any command-line sort order overrides this setting.
+Default sort order to use when fetching items from the database. Defaults to
+``artist+ album+ disc+ track+``. Explicit sort orders override this default.
.. _sort_album:
sort_album
~~~~~~~~~~
-Sort order to use when listing *albums* with the :ref:`list-cmd`
-command. Defaults to ``smartartist+``. Any command-line sort order overrides
-this setting.
+Default sort order to use when fetching items from the database. Defaults to
+``albumartist+ album+``. Explicit sort orders override this default.
.. _original_date:
diff --git a/docs/reference/query.rst b/docs/reference/query.rst
index b85a03962..7dc79461a 100644
--- a/docs/reference/query.rst
+++ b/docs/reference/query.rst
@@ -190,25 +190,23 @@ ones you've already added to your beets library.
Sort Order
----------
-You can also specify the order used when outputting the results. Of course, this
-is only useful when displaying the result, for example with the ``list``
-command, and is useless when the query is used as a filter for an command. Use
-the name of the `field` you want to sort on, followed by a ``+`` or ``-`` sign
-if you want ascending or descending sort. For example this command::
+Queries can specify a sort order. Use the name of the `field` you want to sort
+on, followed by a ``+`` or ``-`` sign to indicate ascending or descending
+sort. For example, this command::
$ beet list -a year+
-will list all albums in chronological order.
-
-There is a special ``smartartist`` sort that uses sort-specific field (
-``artist_sort`` for items and ``albumartist_sort`` for albums) but falls back to
-standard artist fields if these are empty. When no sort order is specified,
-``smartartist+`` is used (but this is configurable).
-
-You can also specify several sort orders, which will be used in the same order at
-which they appear in your query::
+will list all albums in chronological order. You can also specify several sort
+orders, which will be used in the same order as they appear in your query::
$ beet list -a genre+ year+
This command will sort all albums by genre and, in each genre, in chronological
order.
+
+The ``artist`` and ``albumartist`` keys are special: they attempt to use their
+corresponding ``artist_sort`` and ``albumartist_sort`` fields for sorting
+transparently (but fall back to the ordinary fields when those are empty).
+
+You can set the default sorting behavior with the :ref:`sort_item` and
+:ref:`sort_album` configuration options.
diff --git a/extra/release.py b/extra/release.py
index 0a1730e4a..3f2f9ce6b 100755
--- a/extra/release.py
+++ b/extra/release.py
@@ -55,7 +55,7 @@ VERSION_LOCS = [
os.path.join(BASE, 'setup.py'),
[
(
- r'version\s*=\s*[\'"]([0-9\.]+)[\'"]',
+ r'\s*version\s*=\s*[\'"]([0-9\.]+)[\'"]',
" version='{version}',",
)
]
@@ -77,6 +77,7 @@ def bump_version(version):
# Read and transform the file.
out_lines = []
with open(filename) as f:
+ found = False
for line in f:
for pattern, template in locations:
match = re.match(pattern, line)
@@ -96,12 +97,16 @@ def bump_version(version):
minor=minor,
) + '\n')
+ found = True
break
else:
# Normal line.
out_lines.append(line)
+ if not found:
+ print("No pattern found in {}".format(filename))
+
# Write the file back.
with open(filename, 'w') as f:
f.write(''.join(out_lines))
@@ -180,6 +185,9 @@ def changelog_as_markdown():
# Other backslashes with verbatim ranges.
rst = re.sub(r'(\s)`([^`]+)`([^_])', r'\1``\2``\3', rst)
+ # Command links with command names.
+ rst = re.sub(r':ref:`(\w+)-cmd`', r'``\1``', rst)
+
return rst2md(rst)
diff --git a/setup.py b/setup.py
index 04e8db1b3..180374d0c 100755
--- a/setup.py
+++ b/setup.py
@@ -45,7 +45,7 @@ if 'sdist' in sys.argv:
setup(
name='beets',
- version='1.3.8',
+ version='1.3.9',
description='music tagger and library organizer',
author='Adrian Sampson',
author_email='adrian@radbox.org',
diff --git a/test/helper.py b/test/helper.py
index 6beac5d5a..875c99643 100644
--- a/test/helper.py
+++ b/test/helper.py
@@ -35,6 +35,7 @@ import os
import os.path
import shutil
import subprocess
+import logging
from tempfile import mkdtemp, mkstemp
from contextlib import contextmanager
from StringIO import StringIO
@@ -52,6 +53,27 @@ from beets.mediafile import MediaFile
import _common
+class LogCapture(logging.Handler):
+
+ def __init__(self):
+ logging.Handler.__init__(self)
+ self.messages = []
+
+ def emit(self, record):
+ self.messages.append(str(record.msg))
+
+
+@contextmanager
+def capture_log(logger='beets'):
+ capture = LogCapture()
+ log = logging.getLogger(logger)
+ log.addHandler(capture)
+ try:
+ yield capture.messages
+ finally:
+ log.removeHandler(capture)
+
+
@contextmanager
def control_stdin(input=None):
"""Sends ``input`` to stdin.
diff --git a/test/lyrics_sources.py b/test/lyrics_sources.py
deleted file mode 100644
index 58be4eb4d..000000000
--- a/test/lyrics_sources.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# This file is part of beets.
-# Copyright 2014, Fabrice Laporte.
-#
-# Permission is hereby granted, free of charge, to any person obtaining
-# a copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be
-# included in all copies or substantial portions of the Software.
-
-"""Tests for the 'lyrics' plugin"""
-
-import os
-import logging
-import _common
-from _common import unittest
-from beetsplug import lyrics
-from beets import config
-from beets.util import confit
-from bs4 import BeautifulSoup
-
-log = logging.getLogger('beets')
-LYRICS_TEXTS = confit.load_yaml(os.path.join(_common.RSRC, 'lyricstext.yaml'))
-
-try:
- googlekey = config['lyrics']['google_API_key'].get(unicode)
-except confit.NotFoundError:
- googlekey = None
-
-# default query for tests
-definfo = dict(artist=u'The Beatles', title=u'Lady Madonna')
-
-
-class MockFetchUrl(object):
- def __init__(self, pathval='fetched_path'):
- self.pathval = pathval
- self.fetched = None
-
- def __call__(self, url, filename=None):
- self.fetched = url
- url = url.replace('http://', '').replace('www.', '')
- fn = "".join(x for x in url if (x.isalnum() or x == '/'))
- fn = fn.split('/')
- fn = os.path.join('rsrc', 'lyrics', fn[0], fn[-1]) + '.txt'
- with open(fn, 'r') as f:
- content = f.read()
- return content
-
-
-def is_lyrics_content_ok(title, text):
- """Compare lyrics text to expected lyrics for given title"""
-
- setexpected = set(LYRICS_TEXTS[lyrics.slugify(title)].split())
- settext = set(text.split())
- setinter = setexpected.intersection(settext)
- # consider lyrics ok if they share 50% or more with the reference
- if len(setinter):
- ratio = 1.0 * max(len(setexpected), len(settext)) / len(setinter)
- return (ratio > .5 and ratio < 2)
- return False
-
-
-class LyricsPluginTest(unittest.TestCase):
- def setUp(self):
- """Set up configuration"""
- lyrics.LyricsPlugin()
-
- def test_default_ok(self):
- """Test each lyrics engine with the default query"""
-
- lyrics.fetch_url = MockFetchUrl()
-
- for f in (lyrics.fetch_lyricswiki, lyrics.fetch_lyricscom):
- res = f(definfo['artist'], definfo['title'])
- self.assertTrue(lyrics.is_lyrics(res))
- self.assertTrue(is_lyrics_content_ok(definfo['title'], res))
-
- def test_missing_lyrics(self):
- self.assertFalse(lyrics.is_lyrics(LYRICS_TEXTS['missing_texts']))
-
-
-class LyricsScrapingPluginTest(unittest.TestCase):
-
- # Every source entered in default beets google custom search engine
- # must be listed below.
- # Use default query when possible, or override artist and title field
- # if website don't have lyrics for default query.
- sourcesOk = [
- dict(definfo, url=u'http://www.smartlyrics.com',
- path=u'/Song18148-The-Beatles-Lady-Madonna-lyrics.aspx'),
- dict(definfo, url=u'http://www.elyricsworld.com',
- path=u'/lady_madonna_lyrics_beatles.html'),
- dict(artist=u'Beres Hammond', title=u'I could beat myself',
- url=u'http://www.reggaelyrics.info',
- path=u'/beres-hammond/i-could-beat-myself'),
- dict(definfo, artist=u'Lilly Wood & the prick', title=u"Hey it's ok",
- url=u'http://www.lyricsmania.com',
- path=u'/hey_its_ok_lyrics_lilly_wood_and_the_prick.html'),
- dict(definfo, artist=u'Lilly Wood & the prick', title=u"Hey it's ok",
- url=u'http://www.paroles.net/',
- path=u'lilly-wood-the-prick/paroles-hey-it-s-ok'),
- dict(definfo, artist=u'Amy Winehouse', title=u"Jazz'n'blues",
- url=u'http://www.lyricsontop.com',
- path=u'/amy-winehouse-songs/jazz-n-blues-lyrics.html'),
- dict(definfo, url=u'http://www.sweetslyrics.com',
- path=u'/761696.The%20Beatles%20-%20Lady%20Madonna.html'),
- dict(definfo, url=u'http://www.lyrics007.com',
- path=u'/The%20Beatles%20Lyrics/Lady%20Madonna%20Lyrics.html'),
- dict(definfo, url=u'http://www.absolutelyrics.com',
- path=u'/lyrics/view/the_beatles/lady_madonna'),
- dict(definfo, url=u'http://www.azlyrics.com/',
- path=u'/lyrics/beatles/ladymadonna.html'),
- dict(definfo, url=u'http://www.chartlyrics.com',
- path=u'/_LsLsZ7P4EK-F-LD4dJgDQ/Lady+Madonna.aspx'),
- dict(definfo, url='http://www.releaselyrics.com',
- path=u'/e35f/the-beatles-lady-madonna'),
- ]
-
- # Websites that can't be scraped yet and whose results must be
- # flagged as invalid lyrics.
- sourcesFail = [
- dict(definfo, url='http://www.songlyrics.com',
- path=u'/the-beatles/lady-madonna-lyrics'),
- dict(definfo, url='http://www.metrolyrics.com/',
- path='best-for-last-lyrics-adele.html')
- ]
-
- # Websites that return truncated lyrics because of scraping issues, and
- # thus should not be included as sources to Google CSE.
- # They are good candidates for later inclusion after improvement
- # iterations of the scraping algorithm.
- sourcesIncomplete = [
- dict(definfo, artist=u'Lilly Wood & the prick', title=u"Hey it's ok",
- url=u'http://www.lacoccinelle.net',
- path=u'/paroles-officielles/550512.html'),
- ]
-
- def test_sources_ok(self):
- for s in self.sourcesOk:
- url = s['url'] + s['path']
- res = lyrics.scrape_lyrics_from_url(url)
- self.assertTrue(lyrics.is_lyrics(res), url)
- self.assertTrue(is_lyrics_content_ok(s['title'], res), url)
-
- def test_sources_fail(self):
- for s in self.sourcesFail:
- url = s['url'] + s['path']
- res = lyrics.scrape_lyrics_from_url(url)
- # very unlikely these sources pass if the scraping algo is not
- # tweaked on purpose for these cases
- self.assertFalse(lyrics.is_lyrics(res), "%s => %s" % (url, res))
-
- def test_sources_incomplete(self):
- for s in self.sourcesIncomplete:
- url = s['url'] + s['path']
- res = lyrics.scrape_lyrics_from_url(url)
-
- self.assertTrue(lyrics.is_lyrics(res))
- # these sources may pass if the html source evolve or after
- # a random improvement in the scraping algo: we want to
- # be noticed if it's the case.
- if is_lyrics_content_ok(s['title'], res):
- log.debug('Source %s actually return valid lyrics!' % s['url'])
-
- def test_is_page_candidate(self):
- for s in self.sourcesOk:
- url = unicode(s['url'] + s['path'])
- html = lyrics.fetch_url(url)
- soup = BeautifulSoup(html)
- if not soup.title:
- continue
- self.assertEqual(lyrics.is_page_candidate(url, soup.title.string,
- s['title'], s['artist']),
- True, url)
-
-
-def suite():
- return unittest.TestLoader().loadTestsFromName(__name__)
-
-if __name__ == '__main__':
- unittest.main(defaultTest='suite')
diff --git a/test/rsrc/abbey-different.jpg b/test/rsrc/abbey-different.jpg
new file mode 100644
index 000000000..138c0e599
Binary files /dev/null and b/test/rsrc/abbey-different.jpg differ
diff --git a/test/rsrc/abbey-similar.jpg b/test/rsrc/abbey-similar.jpg
new file mode 100644
index 000000000..667cd4f0e
Binary files /dev/null and b/test/rsrc/abbey-similar.jpg differ
diff --git a/test/rsrc/abbey.jpg b/test/rsrc/abbey.jpg
new file mode 100644
index 000000000..5bb14c47d
Binary files /dev/null and b/test/rsrc/abbey.jpg differ
diff --git a/test/rsrc/lyrics/elyricsworldcom/ladymadonnalyricsbeatleshtml.txt b/test/rsrc/lyrics/elyricsworldcom/ladymadonnalyricsbeatleshtml.txt
index c0a12f6c1..fe5b7d09c 100644
--- a/test/rsrc/lyrics/elyricsworldcom/ladymadonnalyricsbeatleshtml.txt
+++ b/test/rsrc/lyrics/elyricsworldcom/ladymadonnalyricsbeatleshtml.txt
@@ -267,6 +267,10 @@
@@ -440,7 +440,6 @@ click: function(score, evt) {
@@ -450,14 +449,6 @@ click: function(score, evt) {
@@ -532,4 +523,4 @@ found at <a href='http://www.elyricsworld.com' target='_blank'>elyricsworl
-