This commit is contained in:
Fabrice Laporte 2013-02-09 17:46:06 +01:00
commit 6b2e409007
34 changed files with 1171 additions and 474 deletions

View file

@ -25,27 +25,25 @@ from beets.util import sorted_walk, ancestry, displayable_path
from .hooks import AlbumInfo, TrackInfo, AlbumMatch, TrackMatch
from .match import AutotagError
from .match import tag_item, tag_album
from .match import \
RECOMMEND_STRONG, RECOMMEND_MEDIUM, RECOMMEND_LOW, RECOMMEND_NONE
from .match import recommendation
# Global logger.
log = logging.getLogger('beets')
# Constants for directory walker.
MULTIDISC_MARKERS = (r'part', r'volume', r'vol\.', r'disc', r'cd')
MULTIDISC_PAT_FMT = r'%s\s*\d'
MULTIDISC_MARKERS = (r'disc', r'cd')
MULTIDISC_PAT_FMT = r'^(.*%s[\W_]*)\d'
# Additional utilities for the main interface.
def albums_in_dir(path):
"""Recursively searches the given directory and returns an iterable
of (path, items) where path is a containing directory and items is
of (paths, items) where paths is a list of directories and items is
a list of Items that is probably an album. Specifically, any folder
containing any media files is an album.
"""
collapse_root = None
collapse_items = None
collapse_pat = collapse_paths = collapse_items = None
for root, dirs, files in sorted_walk(path,
ignore=config['ignore'].as_str_seq()):
@ -63,46 +61,87 @@ def albums_in_dir(path):
else:
items.append(i)
# If we're collapsing, test to see whether we should continue to
# collapse. If so, just add to the collapsed item set;
# otherwise, end the collapse and continue as normal.
if collapse_root is not None:
if collapse_root in ancestry(root):
# If we're currently collapsing the constituent directories in a
# multi-disc album, check whether we should continue collapsing
# and add the current directory. If so, just add the directory
# and move on to the next directory. If not, stop collapsing.
if collapse_paths:
if (not collapse_pat and collapse_paths[0] in ancestry(root)) or \
(collapse_pat and
collapse_pat.match(os.path.basename(root))):
# Still collapsing.
collapse_paths.append(root)
collapse_items += items
continue
else:
# Collapse finished. Yield the collapsed directory and
# proceed to process the current one.
if collapse_items:
yield collapse_root, collapse_items
collapse_root = collapse_items = None
yield collapse_paths, collapse_items
collapse_pat = collapse_paths = collapse_items = None
# Does the current directory look like a multi-disc album? If
# so, begin collapsing here.
if dirs and not items: # Must be only directories.
multidisc = False
for marker in MULTIDISC_MARKERS:
pat = MULTIDISC_PAT_FMT % marker
if all(re.search(pat, dirname, re.I) for dirname in dirs):
multidisc = True
# Check whether this directory looks like the *first* directory
# in a multi-disc sequence. There are two indicators: the file
# is named like part of a multi-disc sequence (e.g., "Title Disc
# 1") or it contains no items but only directories that are
# named in this way.
start_collapsing = False
for marker in MULTIDISC_MARKERS:
marker_pat = re.compile(MULTIDISC_PAT_FMT % marker, re.I)
match = marker_pat.match(os.path.basename(root))
# Is this directory the root of a nested multi-disc album?
if dirs and not items:
# Check whether all subdirectories have the same prefix.
start_collapsing = True
subdir_pat = None
for subdir in dirs:
# The first directory dictates the pattern for
# the remaining directories.
if not subdir_pat:
match = marker_pat.match(subdir)
if match:
subdir_pat = re.compile(r'^%s\d' %
re.escape(match.group(1)), re.I)
else:
start_collapsing = False
break
# Subsequent directories must match the pattern.
elif not subdir_pat.match(subdir):
start_collapsing = False
break
# If all subdirectories match, don't check other
# markers.
if start_collapsing:
break
# This becomes True only when all directories match a
# pattern for a single marker.
if multidisc:
# Start collapsing; continue to the next iteration.
collapse_root = root
collapse_items = []
continue
# Is this directory the first in a flattened multi-disc album?
elif match:
start_collapsing = True
# Set the current pattern to match directories with the same
# prefix as this one, followed by a digit.
collapse_pat = re.compile(r'^%s\d' %
re.escape(match.group(1)), re.I)
break
# If either of the above heuristics indicated that this is the
# beginning of a multi-disc album, initialize the collapsed
# directory and item lists and check the next directory.
if start_collapsing:
# Start collapsing; continue to the next iteration.
collapse_paths = [root]
collapse_items = items
continue
# If it's nonempty, yield it.
if items:
yield root, items
yield [root], items
# Clear out any unfinished collapse.
if collapse_root is not None and collapse_items:
yield collapse_root, collapse_items
if collapse_paths and collapse_items:
yield collapse_paths, collapse_items
def apply_item_metadata(item, track_info):
"""Set an item's metadata from its matched TrackInfo object.
@ -139,12 +178,21 @@ def apply_metadata(album_info, mapping):
item.albumartist_credit = album_info.artist_credit
# Release date.
if album_info.year:
item.year = album_info.year
if album_info.month:
item.month = album_info.month
if album_info.day:
item.day = album_info.day
for prefix in '', 'original_':
if config['original_date'] and not prefix:
# Ignore specific release date.
continue
for suffix in 'year', 'month', 'day':
key = prefix + suffix
value = getattr(album_info, key)
if value:
setattr(item, key, value)
if config['original_date']:
# If we're using original release date for both
# fields, set item.year = info.original_year,
# etc.
setattr(item, suffix, value)
# Title.
item.title = track_info.title

View file

@ -60,7 +60,8 @@ class AlbumInfo(object):
label=None, mediums=None, artist_sort=None,
releasegroup_id=None, catalognum=None, script=None,
language=None, country=None, albumstatus=None, media=None,
albumdisambig=None, artist_credit=None):
albumdisambig=None, artist_credit=None, original_year=None,
original_month=None, original_day=None):
self.album = album
self.album_id = album_id
self.artist = artist
@ -84,6 +85,9 @@ class AlbumInfo(object):
self.media = media
self.albumdisambig = albumdisambig
self.artist_credit = artist_credit
self.original_year = original_year
self.original_month = original_month
self.original_day = original_day
# Work around a bug in python-musicbrainz-ngs that causes some
# strings to be bytes rather than Unicode.

View file

@ -25,6 +25,7 @@ from unidecode import unidecode
from beets import plugins
from beets import config
from beets.util import levenshtein, plurality
from beets.util.enumeration import enum
from beets.autotag import hooks
# Distance parameters.
@ -71,11 +72,8 @@ SD_REPLACE = [
(r'&', 'and'),
]
# Recommendation constants.
RECOMMEND_STRONG = 'RECOMMEND_STRONG'
RECOMMEND_MEDIUM = 'RECOMMEND_MEDIUM'
RECOMMEND_LOW = 'RECOMMEND_LOW'
RECOMMEND_NONE = 'RECOMMEND_NONE'
# Recommendation enumeration.
recommendation = enum('none', 'low', 'medium', 'strong', name='recommendation')
# Artist signals that indicate "various artists". These are used at the
# album level to determine whether a given release is likely a VA
@ -322,37 +320,68 @@ def match_by_id(items):
log.debug('No album ID consensus.')
return None
def recommendation(results):
def _recommendation(results):
"""Given a sorted list of AlbumMatch or TrackMatch objects, return a
recommendation flag (RECOMMEND_STRONG, RECOMMEND_MEDIUM,
RECOMMEND_NONE) based on the results' distances.
recommendation based on the results' distances.
If the recommendation is higher than the configured maximum for
certain situations, the recommendation will be downgraded to the
configured maximum.
"""
if not results:
# No candidates: no recommendation.
rec = RECOMMEND_NONE
return recommendation.none
# Basic distance thresholding.
min_dist = results[0].distance
if min_dist < config['match']['strong_rec_thresh'].as_number():
# Strong recommendation level.
rec = recommendation.strong
elif min_dist <= config['match']['medium_rec_thresh'].as_number():
# Medium recommendation level.
rec = recommendation.medium
elif len(results) == 1:
# Only a single candidate.
rec = recommendation.low
elif results[1].distance - min_dist >= \
config['match']['rec_gap_thresh'].as_number():
# Gap between first two candidates is large.
rec = recommendation.low
else:
min_dist = results[0].distance
if min_dist < config['match']['strong_rec_thresh'].as_number():
# Partial matches get downgraded to "medium".
if isinstance(results[0], hooks.AlbumMatch) and \
(results[0].extra_items or results[0].extra_tracks):
rec = RECOMMEND_MEDIUM
else:
# Strong recommendation level.
rec = RECOMMEND_STRONG
elif min_dist <= config['match']['medium_rec_thresh'].as_number():
# Medium recommendation level.
rec = RECOMMEND_MEDIUM
elif len(results) == 1:
# Only a single candidate.
rec = RECOMMEND_LOW
elif results[1].distance - min_dist >= \
config['match']['rec_gap_thresh'].as_number():
# Gap between first two candidates is large.
rec = RECOMMEND_LOW
else:
# No conclusion.
rec = RECOMMEND_NONE
# No conclusion.
rec = recommendation.none
# "Downgrades" in certain configured situations.
if isinstance(results[0], hooks.AlbumMatch):
# Load the configured recommendation maxima.
max_rec = {}
for trigger in 'partial', 'tracklength', 'tracknumber':
max_rec[trigger] = \
config['match']['max_rec'][trigger].as_choice({
'strong': recommendation.strong,
'medium': recommendation.medium,
'low': recommendation.low,
'none': recommendation.none,
})
# Partial match.
if rec > max_rec['partial'] and \
(results[0].extra_items or results[0].extra_tracks):
rec = max_rec['partial']
# Check track number and duration for each item.
for item, track_info in results[0].mapping.items():
# Track length differs.
if rec > max_rec['tracklength'] and \
item.length and track_info.length and \
abs(item.length - track_info.length) > TRACK_LENGTH_GRACE:
rec = max_rec['tracklength']
# Track number differs.
elif rec > max_rec['tracknumber'] and item.track not in \
(track_info.index, track_info.medium_index):
rec = max_rec['tracknumber']
return rec
def _add_candidate(items, results, info):
@ -386,10 +415,7 @@ def tag_album(items, search_artist=None, search_album=None,
- The current album.
- A list of AlbumMatch objects. The candidates are sorted by
distance (i.e., best match first).
- A recommendation, one of RECOMMEND_STRONG, RECOMMEND_MEDIUM,
or RECOMMEND_NONE; indicating that the first candidate is
very likely, it is somewhat likely, or no conclusion could
be reached.
- A recommendation.
If search_artist and search_album or search_id are provided, then
they are used as search terms in place of the current metadata.
May raise an AutotagError if existing metadata is insufficient.
@ -410,13 +436,13 @@ def tag_album(items, search_artist=None, search_album=None,
id_info = match_by_id(items)
if id_info:
_add_candidate(items, candidates, id_info)
rec = recommendation(candidates.values())
rec = _recommendation(candidates.values())
log.debug('Album ID match recommendation is ' + str(rec))
if candidates and not config['import']['timid']:
# If we have a very good MBID match, return immediately.
# Otherwise, this match will compete against metadata-based
# matches.
if rec == RECOMMEND_STRONG:
if rec == recommendation.strong:
log.debug('ID match.')
return cur_artist, cur_album, candidates.values(), rec
@ -425,7 +451,7 @@ def tag_album(items, search_artist=None, search_album=None,
if candidates:
return cur_artist, cur_album, candidates.values(), rec
else:
return cur_artist, cur_album, [], RECOMMEND_NONE
return cur_artist, cur_album, [], recommendation.none
# Search terms.
if not (search_artist and search_album):
@ -448,7 +474,7 @@ def tag_album(items, search_artist=None, search_album=None,
# Sort and get the recommendation.
candidates = sorted(candidates.itervalues())
rec = recommendation(candidates)
rec = _recommendation(candidates)
return cur_artist, cur_album, candidates, rec
def tag_item(item, search_artist=None, search_title=None,
@ -473,8 +499,8 @@ def tag_item(item, search_artist=None, search_title=None,
candidates[track_info.track_id] = \
hooks.TrackMatch(dist, track_info)
# If this is a good match, then don't keep searching.
rec = recommendation(candidates.values())
if rec == RECOMMEND_STRONG and not config['import']['timid']:
rec = _recommendation(candidates.values())
if rec == recommendation.strong and not config['import']['timid']:
log.debug('Track ID match.')
return candidates.values(), rec
@ -483,7 +509,7 @@ def tag_item(item, search_artist=None, search_title=None,
if candidates:
return candidates.values(), rec
else:
return [], RECOMMEND_NONE
return [], recommendation.none
# Search terms.
if not (search_artist and search_title):
@ -498,5 +524,5 @@ def tag_item(item, search_artist=None, search_title=None,
# Sort by distance and return with recommendation.
log.debug('Found %i candidates.' % len(candidates))
candidates = sorted(candidates.itervalues())
rec = recommendation(candidates)
rec = _recommendation(candidates)
return candidates, rec

View file

@ -135,9 +135,10 @@ def track_info(recording, index=None, medium=None, medium_index=None):
info.decode()
return info
def _set_date_str(info, date_str):
def _set_date_str(info, date_str, original=False):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately.
object, set the object's release date fields appropriately. If
`original`, then set the original_year, etc., fields.
"""
if date_str:
date_parts = date_str.split('-')
@ -148,6 +149,9 @@ def _set_date_str(info, date_str):
date_num = int(date_part)
except ValueError:
continue
if original:
key = 'original_' + key
setattr(info, key, date_num)
def album_info(release):
@ -188,23 +192,31 @@ def album_info(release):
info.va = info.artist_id == VARIOUS_ARTISTS_ID
info.asin = release.get('asin')
info.releasegroup_id = release['release-group']['id']
info.albumdisambig = release['release-group'].get('disambiguation')
info.country = release.get('country')
info.albumstatus = release.get('status')
# Build up the disambiguation string from the release group and release.
disambig = []
if release['release-group'].get('disambiguation'):
disambig.append(release['release-group'].get('disambiguation'))
if release.get('disambiguation'):
disambig.append(release.get('disambiguation'))
info.albumdisambig = u', '.join(disambig)
# Release type not always populated.
if 'type' in release['release-group']:
reltype = release['release-group']['type']
if reltype:
info.albumtype = reltype.lower()
# Release date.
if 'first-release-date' in release['release-group']:
# Try earliest release date for the entire group first.
_set_date_str(info, release['release-group']['first-release-date'])
elif 'date' in release:
# Fall back to release-specific date.
_set_date_str(info, release['date'])
# Release dates.
release_date = release.get('date')
release_group_date = release['release-group'].get('first-release-date')
if not release_date:
# Fall back if release-specific date is not available.
release_date = release_group_date
_set_date_str(info, release_date, False)
_set_date_str(info, release_group_date, True)
# Label name.
if release.get('label-info-list'):

View file

@ -17,6 +17,7 @@ import:
singletons: no
default_action: apply
clutter: ["Thumbs.DB", ".DS_Store"]
ignore: [".*", "*~"]
replace:
'[\\/]': _
@ -25,7 +26,9 @@ replace:
'[<>:"\?\*\|]': _
'\.$': _
'\s+$': ''
path_sep_replace: _
art_filename: cover
max_filename_length: 0
plugins: []
pluginpath: []
@ -35,6 +38,11 @@ timeout: 5.0
per_disc_numbering: no
verbose: no
terminal_encoding: utf8
original_date: no
ui:
terminal_width: 80
length_diff_thresh: 10.0
list_format_item: $artist - $album - $title
list_format_album: $albumartist - $album
@ -55,3 +63,7 @@ match:
strong_rec_thresh: 0.04
medium_rec_thresh: 0.25
rec_gap_thresh: 0.25
max_rec:
partial: medium
tracklength: strong
tracknumber: strong

View file

@ -161,21 +161,21 @@ def _save_state(state):
# Utilities for reading and writing the beets progress file, which
# allows long tagging tasks to be resumed when they pause (or crash).
PROGRESS_KEY = 'tagprogress'
def progress_set(toppath, path):
def progress_set(toppath, paths):
"""Record that tagging for the given `toppath` was successful up to
`path`. If path is None, then clear the progress value (indicating
`paths`. If paths is None, then clear the progress value (indicating
that the tagging completed).
"""
state = _open_state()
if PROGRESS_KEY not in state:
state[PROGRESS_KEY] = {}
if path is None:
if paths is None:
# Remove progress from file.
if toppath in state[PROGRESS_KEY]:
del state[PROGRESS_KEY][toppath]
else:
state[PROGRESS_KEY][toppath] = path
state[PROGRESS_KEY][toppath] = paths
_save_state(state)
def progress_get(toppath):
@ -192,19 +192,19 @@ def progress_get(toppath):
# This keeps track of all directories that were ever imported, which
# allows the importer to only import new stuff.
HISTORY_KEY = 'taghistory'
def history_add(path):
"""Indicate that the import of `path` is completed and should not
be repeated in incremental imports.
def history_add(paths):
"""Indicate that the import of the album in `paths` is completed and
should not be repeated in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
state[HISTORY_KEY] = set()
state[HISTORY_KEY].add(path)
state[HISTORY_KEY].add(tuple(paths))
_save_state(state)
def history_get():
"""Get the set of completed paths in incremental imports.
"""Get the set of completed path tuples in incremental imports.
"""
state = _open_state()
if HISTORY_KEY not in state:
@ -258,12 +258,13 @@ class ImportSession(object):
if not iconfig['copy']:
iconfig['delete'] = False
def tag_log(self, status, path):
def tag_log(self, status, paths):
"""Log a message about a given album to logfile. The status should
reflect the reason the album couldn't be tagged.
"""
if self.logfile:
print('{0} {1}'.format(status, path), file=self.logfile)
print(u'{0} {1}'.format(status, displayable_path(paths)),
file=self.logfile)
self.logfile.flush()
def log_choice(self, task, duplicate=False):
@ -271,21 +272,21 @@ class ImportSession(object):
``duplicate``, then this is a secondary choice after a duplicate was
detected and a decision was made.
"""
path = task.path if task.is_album else task.item.path
paths = task.paths if task.is_album else [task.item.path]
if duplicate:
# Duplicate: log all three choices (skip, keep both, and trump).
if task.remove_duplicates:
self.tag_log('duplicate-replace', path)
self.tag_log('duplicate-replace', paths)
elif task.choice_flag in (action.ASIS, action.APPLY):
self.tag_log('duplicate-keep', path)
self.tag_log('duplicate-keep', paths)
elif task.choice_flag is (action.SKIP):
self.tag_log('duplicate-skip', path)
self.tag_log('duplicate-skip', paths)
else:
# Non-duplicate: log "skip" and "asis" choices.
if task.choice_flag is action.ASIS:
self.tag_log('asis', path)
self.tag_log('asis', paths)
elif task.choice_flag is action.SKIP:
self.tag_log('skip', path)
self.tag_log('skip', paths)
def should_resume(self, path):
raise NotImplementedError
@ -347,9 +348,9 @@ class ImportTask(object):
"""Represents a single set of items to be imported along with its
intermediate state. May represent an album or a single item.
"""
def __init__(self, toppath=None, path=None, items=None):
def __init__(self, toppath=None, paths=None, items=None):
self.toppath = toppath
self.path = path
self.paths = paths
self.items = items
self.sentinel = False
self.remove_duplicates = False
@ -365,12 +366,12 @@ class ImportTask(object):
return obj
@classmethod
def progress_sentinel(cls, toppath, path):
def progress_sentinel(cls, toppath, paths):
"""Create a task indicating that a single directory in a larger
import has finished. This is only required for singleton
imports; progress is implied for album imports.
"""
obj = cls(toppath, path)
obj = cls(toppath, paths)
obj.sentinel = True
return obj
@ -431,19 +432,19 @@ class ImportTask(object):
"""Updates the progress state to indicate that this album has
finished.
"""
if self.sentinel and self.path is None:
if self.sentinel and self.paths is None:
# "Done" sentinel.
progress_set(self.toppath, None)
elif self.sentinel or self.is_album:
# "Directory progress" sentinel for singletons or a real
# album task, which implies the same.
progress_set(self.toppath, self.path)
progress_set(self.toppath, self.paths)
def save_history(self):
"""Save the directory in the history for incremental imports.
"""
if self.sentinel or self.is_album:
history_add(self.path)
if self.is_album and not self.sentinel:
history_add(self.paths)
# Logical decisions.
@ -512,7 +513,9 @@ class ImportTask(object):
call when the file in question may not have been removed.
"""
if self.toppath and not os.path.exists(filename):
util.prune_dirs(os.path.dirname(filename), self.toppath)
util.prune_dirs(os.path.dirname(filename),
self.toppath,
clutter=config['clutter'].get(list))
# Full-album pipeline stages.
@ -575,7 +578,7 @@ def read_tasks(session):
continue
# When incremental, skip paths in the history.
if config['import']['incremental'] and path in history_dirs:
if config['import']['incremental'] and tuple(path) in history_dirs:
log.debug(u'Skipping previously-imported path: %s' %
displayable_path(path))
incremental_skipped += 1
@ -613,7 +616,7 @@ def query_tasks(session):
log.debug('yielding album %i: %s - %s' %
(album.id, album.albumartist, album.album))
items = list(album.items())
yield ImportTask(None, album.item_dir(), items)
yield ImportTask(None, [album.item_dir()], items)
def initial_lookup(session):
"""A coroutine for performing the initial MusicBrainz lookup for an
@ -629,7 +632,7 @@ def initial_lookup(session):
plugins.send('import_task_start', session=session, task=task)
log.debug('Looking up: %s' % task.path)
log.debug('Looking up: %s' % displayable_path(task.paths))
try:
task.set_candidates(*autotag.tag_album(task.items,
config['import']['timid']))
@ -662,7 +665,7 @@ def user_query(session):
def emitter():
for item in task.items:
yield ImportTask.item_task(item)
yield ImportTask.progress_sentinel(task.toppath, task.path)
yield ImportTask.progress_sentinel(task.toppath, task.paths)
def collector():
while True:
item_task = yield
@ -695,7 +698,7 @@ def show_progress(session):
if task.sentinel:
continue
log.info(task.path)
log.info(displayable_path(task.paths))
# Behave as if ASIS were selected.
task.set_null_candidates()

View file

@ -32,6 +32,7 @@ from beets import util
from beets.util import bytestring_path, syspath, normpath, samefile,\
displayable_path
from beets.util.functemplate import Template
import beets
MAX_FILENAME_LENGTH = 200
@ -93,6 +94,9 @@ ITEM_FIELDS = [
('rg_track_peak', 'real', True, True),
('rg_album_gain', 'real', True, True),
('rg_album_peak', 'real', True, True),
('original_year', 'int', True, True),
('original_month', 'int', True, True),
('original_day', 'int', True, True),
('length', 'real', False, True),
('bitrate', 'int', False, True),
@ -139,6 +143,9 @@ ALBUM_FIELDS = [
('albumdisambig', 'text', True),
('rg_album_gain', 'real', True),
('rg_album_peak', 'real', True),
('original_year', 'int', True),
('original_month', 'int', True),
('original_day', 'int', True),
]
ALBUM_KEYS = [f[0] for f in ALBUM_FIELDS]
ALBUM_KEYS_ITEM = [f[0] for f in ALBUM_FIELDS if f[2]]
@ -183,6 +190,39 @@ def _regexp(expr, val):
return False
return res is not None
# Path element formatting for templating.
def format_for_path(value, key=None, pathmod=None):
"""Sanitize the value for inclusion in a path: replace separators
with _, etc. Doesn't guarantee that the whole path will be valid;
you should still call `util.sanitize_path` on the complete path.
"""
pathmod = pathmod or os.path
if isinstance(value, basestring):
for sep in (pathmod.sep, pathmod.altsep):
if sep:
value = value.replace(
sep,
beets.config['path_sep_replace'].get(unicode),
)
elif key in ('track', 'tracktotal', 'disc', 'disctotal'):
# Pad indices with zeros.
value = u'%02i' % (value or 0)
elif key == 'year':
value = u'%04i' % (value or 0)
elif key in ('month', 'day'):
value = u'%02i' % (value or 0)
elif key == 'bitrate':
# Bitrate gets formatted as kbps.
value = u'%ikbps' % ((value or 0) // 1000)
elif key == 'samplerate':
# Sample rate formatted as kHz.
value = u'%ikHz' % ((value or 0) // 1000)
else:
value = unicode(value)
return value
# Exceptions.
@ -361,7 +401,7 @@ class Item(object):
# From Item.
value = getattr(self, key)
if sanitize:
value = util.sanitize_for_path(value, pathmod, key)
value = format_for_path(value, key, pathmod)
mapping[key] = value
# Additional fields in non-sanitized case.
@ -378,7 +418,7 @@ class Item(object):
# Get values from plugins.
for key, value in plugins.template_values(self).iteritems():
if sanitize:
value = util.sanitize_for_path(value, pathmod, key)
value = format_for_path(value, key, pathmod)
mapping[key] = value
# Get template functions.
@ -1118,6 +1158,7 @@ class Library(BaseLibrary):
"""
pathmod = pathmod or os.path
platform = platform or sys.platform
basedir = basedir or self.directory
# Use a path format based on a query, falling back on the
# default.
@ -1163,12 +1204,15 @@ class Library(BaseLibrary):
subpath += extension.lower()
# Truncate too-long components.
subpath = util.truncate_path(subpath, pathmod)
maxlen = beets.config['max_filename_length'].get(int)
if not maxlen:
# When zero, try to determine from filesystem.
maxlen = util.max_filename_length(self.directory)
subpath = util.truncate_path(subpath, pathmod, maxlen)
if fragment:
return subpath
else:
basedir = basedir or self.directory
return normpath(os.path.join(basedir, subpath))
@ -1568,7 +1612,7 @@ class Album(BaseAlbum):
if not isinstance(self._library.art_filename,Template):
self._library.art_filename = Template(self._library.art_filename)
subpath = util.sanitize_path(util.sanitize_for_path(
subpath = util.sanitize_path(format_for_path(
self.evaluate_template(self._library.art_filename)
))
subpath = bytestring_path(subpath)
@ -1764,8 +1808,8 @@ class DefaultTemplateFunctions(object):
return res
# Flatten disambiguation value into a string.
disam_value = util.sanitize_for_path(getattr(album, disambiguator),
self.pathmod, disambiguator)
disam_value = format_for_path(getattr(album, disambiguator),
disambiguator, self.pathmod)
res = u' [{0}]'.format(disam_value)
self.lib._memotable[memokey] = res
return res

View file

@ -38,6 +38,7 @@ import mutagen.asf
import datetime
import re
import base64
import math
import struct
import imghdr
import os
@ -182,12 +183,69 @@ def _pack_asf_image(mime, data, type=3, description=""):
return tag_data
# iTunes Sound Check encoding.
def _sc_decode(soundcheck):
"""Convert a Sound Check string value to a (gain, peak) tuple as
used by ReplayGain.
"""
# SoundCheck tags consist of 10 numbers, each represented by 8
# characters of ASCII hex preceded by a space.
try:
soundcheck = soundcheck.replace(' ', '').decode('hex')
soundcheck = struct.unpack('!iiiiiiiiii', soundcheck)
except struct.error:
# SoundCheck isn't in the format we expect, so return default
# values.
return 0.0, 0.0
# SoundCheck stores absolute calculated/measured RMS value in an
# unknown unit. We need to find the ratio of this measurement
# compared to a reference value of 1000 to get our gain in dB. We
# play it safe by using the larger of the two values (i.e., the most
# attenuation).
gain = math.log10((max(*soundcheck[:2]) or 1000) / 1000.0) * -10
# SoundCheck stores peak values as the actual value of the sample,
# and again separately for the left and right channels. We need to
# convert this to a percentage of full scale, which is 32768 for a
# 16 bit sample. Once again, we play it safe by using the larger of
# the two values.
peak = max(soundcheck[6:8]) / 32768.0
return round(gain, 2), round(peak, 6)
def _sc_encode(gain, peak):
"""Encode ReplayGain gain/peak values as a Sound Check string.
"""
# SoundCheck stores the peak value as the actual value of the
# sample, rather than the percentage of full scale that RG uses, so
# we do a simple conversion assuming 16 bit samples.
peak *= 32768.0
# SoundCheck stores absolute RMS values in some unknown units rather
# than the dB values RG uses. We can calculate these absolute values
# from the gain ratio using a reference value of 1000 units. We also
# enforce the maximum value here, which is equivalent to about
# -18.2dB.
g1 = min(round((10 ** (gain / -10)) * 1000), 65534)
# Same as above, except our reference level is 2500 units.
g2 = min(round((10 ** (gain / -10)) * 2500), 65534)
# The purpose of these values are unknown, but they also seem to be
# unused so we just use zero.
uk = 0
values = (g1, g1, g2, g2, uk, uk, peak, peak, uk, uk)
return (u' %08X' * 10) % values
# Flags for encoding field behavior.
# Determine style of packing, if any.
packing = enum('SLASHED', # pair delimited by /
'TUPLE', # a python tuple of 2 items
'DATE', # YYYY-MM-DD
packing = enum('SLASHED', # pair delimited by /
'TUPLE', # a python tuple of 2 items
'DATE', # YYYY-MM-DD
'SC', # Sound Check gain/peak encoding
name='packing')
class StorageStyle(object):
@ -203,19 +261,38 @@ class StorageStyle(object):
None. (Makes as_type irrelevant).
- pack_pos: If the value is packed, in which position it is
stored.
- ID3 storage only: match against this 'desc' field as well
as the key.
- suffix: When `as_type` is a string type, append this before
storing the value.
- float_places: When the value is a floating-point number and
encoded as a string, the number of digits to store after the
point.
For MP3 only:
- id3_desc: match against this 'desc' field as well
as the key.
- id3_frame_field: store the data in this field of the frame
object.
- id3_lang: set the language field of the frame object.
"""
def __init__(self, key, list_elem = True, as_type = unicode,
packing = None, pack_pos = 0, id3_desc = None,
id3_frame_field = 'text'):
def __init__(self, key, list_elem=True, as_type=unicode,
packing=None, pack_pos=0, pack_type=int,
id3_desc=None, id3_frame_field='text',
id3_lang=None, suffix=None, float_places=2):
self.key = key
self.list_elem = list_elem
self.as_type = as_type
self.packing = packing
self.pack_pos = pack_pos
self.pack_type = pack_type
self.id3_desc = id3_desc
self.id3_frame_field = id3_frame_field
self.id3_lang = id3_lang
self.suffix = suffix
self.float_places = float_places
# Convert suffix to correct string type.
if self.suffix and self.as_type in (str, unicode):
self.suffix = self.as_type(self.suffix)
# Dealing with packings.
@ -228,7 +305,7 @@ class Packed(object):
"""Create a Packed object for subscripting the packed values in
items. The items are packed using packstyle, which is a value
from the packing enum. none_val is returned from a request when
no suitable value is found in the items. Vales are converted to
no suitable value is found in the items. Values are converted to
out_type before they are returned.
"""
self.items = items
@ -256,6 +333,8 @@ class Packed(object):
seq = unicode(items).split('-')
elif self.packstyle == packing.TUPLE:
seq = items # tuple: items is already indexable
elif self.packstyle == packing.SC:
seq = _sc_decode(items)
try:
out = seq[index]
@ -268,8 +347,8 @@ class Packed(object):
return _safe_cast(self.out_type, out)
def __setitem__(self, index, value):
if self.packstyle in (packing.SLASHED, packing.TUPLE):
# SLASHED and TUPLE are always two-item packings
if self.packstyle in (packing.SLASHED, packing.TUPLE, packing.SC):
# SLASHED, TUPLE and SC are always two-item packings
length = 2
else:
# DATE can have up to three fields
@ -302,6 +381,8 @@ class Packed(object):
self.items = '-'.join(elems)
elif self.packstyle == packing.TUPLE:
self.items = new_items
elif self.packstyle == packing.SC:
self.items = _sc_encode(*new_items)
# The field itself.
@ -312,7 +393,7 @@ class MediaField(object):
can be unicode, int, or bool. id3, mp4, and flac are StorageStyle
instances parameterizing the field's storage for each type.
"""
def __init__(self, out_type = unicode, **kwargs):
def __init__(self, out_type=unicode, **kwargs):
"""Creates a new MediaField.
- out_type: The field's semantic (exterior) type.
- kwargs: A hash whose keys are 'mp3', 'mp4', 'asf', and 'etc'
@ -397,12 +478,14 @@ class MediaField(object):
# need to make a new frame?
if not found:
assert isinstance(style.id3_frame_field, str) # Keyword.
frame = mutagen.id3.Frames[style.key](
encoding=3,
desc=style.id3_desc,
**{style.id3_frame_field: val}
)
obj.mgfile.tags.add(frame)
args = {
'encoding': 3,
'desc': style.id3_desc,
style.id3_frame_field: val,
}
if style.id3_lang:
args['lang'] = style.id3_lang
obj.mgfile.tags.add(mutagen.id3.Frames[style.key](**args))
# Try to match on "owner" field.
elif style.key.startswith('UFID:'):
@ -458,7 +541,13 @@ class MediaField(object):
break
if style.packing:
out = Packed(out, style.packing)[style.pack_pos]
p = Packed(out, style.packing, out_type=style.pack_type)
out = p[style.pack_pos]
# Remove suffix.
if style.suffix and isinstance(out, (str, unicode)):
if out.endswith(style.suffix):
out = out[:len(style.suffix)]
# MPEG-4 freeform frames are (should be?) encoded as UTF-8.
if obj.type == 'mp4' and style.key.startswith('----:') and \
@ -478,17 +567,20 @@ class MediaField(object):
for style in styles:
if style.packing:
p = Packed(self._fetchdata(obj, style), style.packing)
p = Packed(self._fetchdata(obj, style), style.packing,
out_type=style.pack_type)
p[style.pack_pos] = val
out = p.items
else: # unicode, integer, or boolean scalar
else: # Unicode, integer, boolean, or float scalar.
out = val
# deal with Nones according to abstract type if present
if out is None:
if self.out_type == int:
out = 0
elif self.out_type == float:
out = 0.0
elif self.out_type == bool:
out = False
elif self.out_type == unicode:
@ -497,12 +589,16 @@ class MediaField(object):
# Convert to correct storage type (irrelevant for
# packed values).
if style.as_type == unicode:
if self.out_type == float and style.as_type in (str, unicode):
# Special case for float-valued data.
out = u'{0:.{1}f}'.format(out, style.float_places)
out = style.as_type(out)
elif style.as_type == unicode:
if out is None:
out = u''
else:
if self.out_type == bool:
# store bools as 1,0 instead of True,False
# Store bools as 1/0 instead of True/False.
out = unicode(int(bool(out)))
elif isinstance(out, str):
out = out.decode('utf8', 'ignore')
@ -516,6 +612,10 @@ class MediaField(object):
elif style.as_type in (bool, str):
out = style.as_type(out)
# Add a suffix to string storage.
if style.as_type in (str, unicode) and style.suffix:
out += style.suffix
# MPEG-4 "freeform" (----) frames must be encoded as UTF-8
# byte strings.
if obj.type == 'mp4' and style.key.startswith('----:') and \
@ -724,30 +824,6 @@ class ImageField(object):
base64.b64encode(pic.write())
]
class FloatValueField(MediaField):
"""A field that stores a floating-point number as a string."""
def __init__(self, places=2, suffix=None, **kwargs):
"""Make a field that stores ``places`` digits after the decimal
point and appends ``suffix`` (if specified) when encoding as a
string.
"""
super(FloatValueField, self).__init__(unicode, **kwargs)
fmt = ['%.', str(places), 'f']
if suffix:
fmt += [' ', suffix]
self.fmt = ''.join(fmt)
def __get__(self, obj, owner):
valstr = super(FloatValueField, self).__get__(obj, owner)
return _safe_cast(float, valstr)
def __set__(self, obj, val):
if not val:
val = 0.0
valstr = self.fmt % val
super(FloatValueField, self).__set__(obj, valstr)
# The file (a collection of fields).
@ -852,26 +928,6 @@ class MediaFile(object):
etc = StorageStyle('GROUPING'),
asf = StorageStyle('WM/ContentGroupDescription'),
)
year = MediaField(out_type=int,
mp3 = StorageStyle('TDRC', packing=packing.DATE, pack_pos=0),
mp4 = StorageStyle("\xa9day", packing=packing.DATE, pack_pos=0),
etc = [StorageStyle('DATE', packing=packing.DATE, pack_pos=0),
StorageStyle('YEAR')],
asf = StorageStyle('WM/Year', packing=packing.DATE, pack_pos=0),
)
month = MediaField(out_type=int,
mp3 = StorageStyle('TDRC', packing=packing.DATE, pack_pos=1),
mp4 = StorageStyle("\xa9day", packing=packing.DATE, pack_pos=1),
etc = StorageStyle('DATE', packing=packing.DATE, pack_pos=1),
asf = StorageStyle('WM/Year', packing=packing.DATE, pack_pos=1),
)
day = MediaField(out_type=int,
mp3 = StorageStyle('TDRC', packing=packing.DATE, pack_pos=2),
mp4 = StorageStyle("\xa9day", packing=packing.DATE, pack_pos=2),
etc = StorageStyle('DATE', packing=packing.DATE, pack_pos=2),
asf = StorageStyle('WM/Year', packing=packing.DATE, pack_pos=2),
)
date = CompositeDateField(year, month, day)
track = MediaField(out_type=int,
mp3 = StorageStyle('TRCK', packing=packing.SLASHED, pack_pos=0),
mp4 = StorageStyle('trkn', packing=packing.TUPLE, pack_pos=0),
@ -1025,6 +1081,56 @@ class MediaFile(object):
asf = StorageStyle('MusicBrainz/Album Comment'),
)
# Release date.
year = MediaField(out_type=int,
mp3 = StorageStyle('TDRC', packing=packing.DATE, pack_pos=0),
mp4 = StorageStyle("\xa9day", packing=packing.DATE, pack_pos=0),
etc = [StorageStyle('DATE', packing=packing.DATE, pack_pos=0),
StorageStyle('YEAR')],
asf = StorageStyle('WM/Year', packing=packing.DATE, pack_pos=0),
)
month = MediaField(out_type=int,
mp3 = StorageStyle('TDRC', packing=packing.DATE, pack_pos=1),
mp4 = StorageStyle("\xa9day", packing=packing.DATE, pack_pos=1),
etc = StorageStyle('DATE', packing=packing.DATE, pack_pos=1),
asf = StorageStyle('WM/Year', packing=packing.DATE, pack_pos=1),
)
day = MediaField(out_type=int,
mp3 = StorageStyle('TDRC', packing=packing.DATE, pack_pos=2),
mp4 = StorageStyle("\xa9day", packing=packing.DATE, pack_pos=2),
etc = StorageStyle('DATE', packing=packing.DATE, pack_pos=2),
asf = StorageStyle('WM/Year', packing=packing.DATE, pack_pos=2),
)
date = CompositeDateField(year, month, day)
# *Original* release date.
original_year = MediaField(out_type=int,
mp3 = StorageStyle('TDOR', packing=packing.DATE, pack_pos=0),
mp4 = StorageStyle('----:com.apple.iTunes:ORIGINAL YEAR',
packing=packing.DATE, pack_pos=0),
etc = StorageStyle('ORIGINALDATE', packing=packing.DATE, pack_pos=0),
asf = StorageStyle('WM/OriginalReleaseYear', packing=packing.DATE,
pack_pos=0),
)
original_month = MediaField(out_type=int,
mp3 = StorageStyle('TDOR', packing=packing.DATE, pack_pos=1),
mp4 = StorageStyle('----:com.apple.iTunes:ORIGINAL YEAR',
packing=packing.DATE, pack_pos=1),
etc = StorageStyle('ORIGINALDATE', packing=packing.DATE, pack_pos=1),
asf = StorageStyle('WM/OriginalReleaseYear', packing=packing.DATE,
pack_pos=1),
)
original_day = MediaField(out_type=int,
mp3 = StorageStyle('TDOR', packing=packing.DATE, pack_pos=2),
mp4 = StorageStyle('----:com.apple.iTunes:ORIGINAL YEAR',
packing=packing.DATE, pack_pos=2),
etc = StorageStyle('ORIGINALDATE', packing=packing.DATE, pack_pos=2),
asf = StorageStyle('WM/OriginalReleaseYear', packing=packing.DATE,
pack_pos=2),
)
original_date = CompositeDateField(original_year, original_month,
original_day)
# Nonstandard metadata.
artist_credit = MediaField(
mp3 = StorageStyle('TXXX', id3_desc=u'Artist Credit'),
@ -1102,29 +1208,53 @@ class MediaFile(object):
)
# ReplayGain fields.
rg_track_gain = FloatValueField(2, 'dB',
mp3 = StorageStyle('TXXX', id3_desc=u'REPLAYGAIN_TRACK_GAIN'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_TRACK_GAIN'),
asf = StorageStyle(u'replaygain_track_gain'),
rg_track_gain = MediaField(out_type=float,
mp3 = [StorageStyle('TXXX', id3_desc=u'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=u' dB'),
StorageStyle('COMM', id3_desc=u'iTunNORM', id3_lang='eng',
packing=packing.SC, pack_pos=0, pack_type=float)],
mp4 = [StorageStyle('----:com.apple.iTunes:replaygain_track_gain',
as_type=str, float_places=2, suffix=b' dB'),
StorageStyle('----:com.apple.iTunes:iTunNORM',
packing=packing.SC, pack_pos=0, pack_type=float)],
etc = StorageStyle(u'REPLAYGAIN_TRACK_GAIN',
float_places=2, suffix=u' dB'),
asf = StorageStyle(u'replaygain_track_gain',
float_places=2, suffix=u' dB'),
)
rg_album_gain = FloatValueField(2, 'dB',
mp3 = StorageStyle('TXXX', id3_desc=u'REPLAYGAIN_ALBUM_GAIN'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_ALBUM_GAIN'),
asf = StorageStyle(u'replaygain_album_gain'),
rg_album_gain = MediaField(out_type=float,
mp3 = StorageStyle('TXXX', id3_desc=u'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=u' dB'),
mp4 = StorageStyle('----:com.apple.iTunes:replaygain_album_gain',
as_type=str, float_places=2, suffix=b' dB'),
etc = StorageStyle(u'REPLAYGAIN_ALBUM_GAIN',
float_places=2, suffix=u' dB'),
asf = StorageStyle(u'replaygain_album_gain',
float_places=2, suffix=u' dB'),
)
rg_track_peak = FloatValueField(6, None,
mp3 = StorageStyle('TXXX', id3_desc=u'REPLAYGAIN_TRACK_PEAK'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_TRACK_PEAK'),
asf = StorageStyle(u'replaygain_track_peak'),
rg_track_peak = MediaField(out_type=float,
mp3 = [StorageStyle('TXXX', id3_desc=u'REPLAYGAIN_TRACK_PEAK',
float_places=6),
StorageStyle('COMM', id3_desc=u'iTunNORM', id3_lang='eng',
packing=packing.SC, pack_pos=1, pack_type=float)],
mp4 = [StorageStyle('----:com.apple.iTunes:replaygain_track_peak',
as_type=str, float_places=6),
StorageStyle('----:com.apple.iTunes:iTunNORM',
packing=packing.SC, pack_pos=1, pack_type=float)],
etc = StorageStyle(u'REPLAYGAIN_TRACK_PEAK',
float_places=6),
asf = StorageStyle(u'replaygain_track_peak',
float_places=6),
)
rg_album_peak = FloatValueField(6, None,
mp3 = StorageStyle('TXXX', id3_desc=u'REPLAYGAIN_ALBUM_PEAK'),
mp4 = None,
etc = StorageStyle(u'REPLAYGAIN_ALBUM_PEAK'),
asf = StorageStyle(u'replaygain_album_peak'),
rg_album_peak = MediaField(out_type=float,
mp3 = StorageStyle('TXXX', id3_desc=u'REPLAYGAIN_ALBUM_PEAK',
float_places=6),
mp4 = StorageStyle('----:com.apple.iTunes:replaygain_album_peak',
as_type=str, float_places=6),
etc = StorageStyle(u'REPLAYGAIN_ALBUM_PEAK',
float_places=6),
asf = StorageStyle(u'replaygain_album_peak',
float_places=6),
)
@property

View file

@ -44,8 +44,10 @@ class BeetsPlugin(object):
self.import_stages = []
self.name = name or self.__module__.split('.')[-1]
self.config = beets.config[self.name]
self.template_funcs = {}
self.template_fields = {}
if not self.template_funcs:
self.template_funcs = {}
if not self.template_fields:
self.template_fields = {}
def commands(self):
"""Should return a list of beets.ui.Subcommand objects for
@ -153,7 +155,7 @@ class BeetsPlugin(object):
return func
return helper
_classes = set()
_classes = []
def load_plugins(names=()):
"""Imports the modules for a sequence of plugin names. Each name
must be the name of a Python module under the "beetsplug" namespace
@ -175,7 +177,7 @@ def load_plugins(names=()):
for obj in getattr(namespace, name).__dict__.values():
if isinstance(obj, type) and issubclass(obj, BeetsPlugin) \
and obj != BeetsPlugin:
_classes.add(obj)
_classes.append(obj)
except:
log.warn('** error loading plugin %s' % name)

View file

@ -27,6 +27,7 @@ import logging
import sqlite3
import errno
import re
import struct
from beets import library
from beets import plugins
@ -133,10 +134,9 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
a particular shortcut is desired; in that case, only that letter
should be capitalized.
By default, the first option is the default. If `require` is
provided, then there is no default. `default` can be provided to
override this. The prompt and fallback prompt are also inferred but
can be overridden.
By default, the first option is the default. `default` can be provided to
override this. If `require` is provided, then there is no default. The
prompt and fallback prompt are also inferred but can be overridden.
If numrange is provided, it is a pair of `(high, low)` (both ints)
indicating that, in addition to `options`, the user may enter an
@ -172,9 +172,9 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if (not require and default is None and not numrange and first) \
or (isinstance(default, basestring) and
found_letter.lower() == default.lower()):
if not require and ((default is None and not numrange and first) or
(isinstance(default, basestring) and
found_letter.lower() == default.lower())):
# The first option is the default; mark it.
show_letter = '[%s]' % found_letter.upper()
is_default = True
@ -195,10 +195,10 @@ def input_options(options, require=False, prompt=None, fallback_prompt=None,
first = False
# The default is just the first option if unspecified.
if default is None:
if require:
default = None
elif numrange:
if require:
default = None
elif default is None:
if numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
@ -413,6 +413,29 @@ def colordiff(a, b, highlight='red'):
else:
return unicode(a), unicode(b)
def color_diff_suffix(a, b, highlight='red'):
"""Colorize the differing suffix between two strings."""
a, b = unicode(a), unicode(b)
if not config['color']:
return a, b
# Fast path.
if a == b:
return a, b
# Find the longest common prefix.
first_diff = None
for i in range(min(len(a), len(b))):
if a[i] != b[i]:
first_diff = i
break
else:
first_diff = min(len(a), len(b))
# Colorize from the first difference on.
return a[:first_diff] + colorize(highlight, a[first_diff:]), \
b[:first_diff] + colorize(highlight, b[first_diff:])
def get_path_formats():
"""Get the configuration's path formats as a list of query/template
pairs.
@ -477,6 +500,28 @@ def print_obj(obj, lib, fmt=None):
else:
print_(obj.evaluate_template(template, lib=lib))
def term_width():
"""Get the width (columns) of the terminal."""
fallback = config['ui']['terminal_width'].get(int)
# The fcntl and termios modules are not available on non-Unix
# platforms, so we fall back to a constant.
try:
import fcntl
import termios
except ImportError:
return fallback
try:
buf = fcntl.ioctl(0, termios.TIOCGWINSZ, ' '*4)
except IOError:
return fallback
try:
height, width = struct.unpack('hh', buf)
except struct.error:
return fallback
return width
# Subcommand parsing infrastructure.

View file

@ -22,11 +22,13 @@ import os
import time
import itertools
import re
import codecs
import beets
from beets import ui
from beets.ui import print_, input_, decargs
from beets import autotag
from beets.autotag import recommendation
from beets import plugins
from beets import importer
from beets.util import syspath, normpath, ancestry, displayable_path
@ -181,47 +183,60 @@ def show_change(cur_artist, cur_album, match):
# Tracks.
pairs = match.mapping.items()
pairs.sort(key=lambda (_, track_info): track_info.index)
# Build up LHS and RHS for track difference display. The `lines`
# list contains ``(current title, new title, width)`` tuples where
# `width` is the length (in characters) of the uncolorized LHS.
lines = []
for item, track_info in pairs:
# Get displayable LHS and RHS values.
cur_track = unicode(item.track)
new_track = format_index(track_info)
tracks_differ = item.track not in (track_info.index,
track_info.medium_index)
cur_title = item.title
# Titles.
new_title = track_info.title
if item.length and track_info.length:
cur_length = ui.colorize('red',
ui.human_seconds_short(item.length))
new_length = ui.colorize('red',
ui.human_seconds_short(track_info.length))
# Colorize changes.
cur_title, new_title = ui.colordiff(cur_title, new_title)
cur_track = ui.colorize('red', cur_track)
new_track = ui.colorize('red', new_track)
# Show filename (non-colorized) when title is not set.
if not item.title.strip():
# If there's no title, we use the filename.
cur_title = displayable_path(os.path.basename(item.path))
if cur_title != new_title:
lhs, rhs = cur_title, new_title
if tracks_differ:
lhs += u' (%s)' % cur_track
rhs += u' (%s)' % new_track
print_(u" * %s ->\n %s" % (lhs, rhs))
else:
line = u' * %s' % item.title
display = False
if tracks_differ:
display = True
line += u' (%s -> %s)' % (cur_track, new_track)
if item.length and track_info.length and \
abs(item.length - track_info.length) > 2.0:
display = True
line += u' (%s vs. %s)' % (cur_length, new_length)
if display:
print_(line)
cur_title = item.title.strip()
lhs, rhs = ui.colordiff(cur_title, new_title)
lhs_width = len(cur_title)
# Track number change.
if item.track not in (track_info.index, track_info.medium_index):
cur_track, new_track = unicode(item.track), format_index(track_info)
lhs_track, rhs_track = ui.color_diff_suffix(cur_track, new_track)
templ = ui.colorize('red', u' (#') + u'{0}' + \
ui.colorize('red', u')')
lhs += templ.format(lhs_track)
rhs += templ.format(rhs_track)
lhs_width += len(cur_track) + 4
# Length change.
if item.length and track_info.length and \
abs(item.length - track_info.length) > \
config['ui']['length_diff_thresh'].as_number():
cur_length = ui.human_seconds_short(item.length)
new_length = ui.human_seconds_short(track_info.length)
lhs_length, rhs_length = ui.color_diff_suffix(cur_length,
new_length)
templ = ui.colorize('red', u' (') + u'{0}' + \
ui.colorize('red', u')')
lhs += templ.format(lhs_length)
rhs += templ.format(rhs_length)
lhs_width += len(cur_length) + 3
if lhs != rhs:
lines.append((lhs, rhs, lhs_width))
# Print each track in two columns, or across two lines.
col_width = (ui.term_width() - len(''.join([' * ', ' -> ']))) // 2
if lines:
max_width = max(w for _, _, w in lines)
for lhs, rhs, lhs_width in lines:
if max_width > col_width:
print_(u' * %s ->\n %s' % (lhs, rhs))
else:
pad = max_width - lhs_width
print_(u' * %s%s -> %s' % (lhs, ' ' * pad, rhs))
# Missing and unmatched tracks.
for track_info in match.extra_tracks:
@ -263,7 +278,7 @@ def _summary_judment(rec):
made.
"""
if config['import']['quiet']:
if rec == autotag.RECOMMEND_STRONG:
if rec == recommendation.strong:
return importer.action.APPLY
else:
action = config['import']['quiet_fallback'].as_choice({
@ -271,7 +286,7 @@ def _summary_judment(rec):
'asis': importer.action.ASIS,
})
elif rec == autotag.RECOMMEND_NONE:
elif rec == recommendation.none:
action = config['import']['none_rec_action'].as_choice({
'skip': importer.action.SKIP,
'asis': importer.action.ASIS,
@ -338,13 +353,13 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
# Is the change good enough?
bypass_candidates = False
if rec != autotag.RECOMMEND_NONE:
if rec != recommendation.none:
match = candidates[0]
bypass_candidates = True
while True:
# Display and choose from candidates.
require = rec in (autotag.RECOMMEND_NONE, autotag.RECOMMEND_LOW)
require = rec <= recommendation.low
if not bypass_candidates:
# Display list of candidates.
@ -371,7 +386,13 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
if match.info.year:
disambig.append(unicode(match.info.year))
if match.info.media:
disambig.append(match.info.media)
if match.info.mediums > 1:
disambig.append(u'{0}x{1}'.format(
match.info.mediums, match.info.media))
else:
disambig.append(match.info.media)
if match.info.albumdisambig:
disambig.append(match.info.albumdisambig)
if disambig:
line += u' [{0}]'.format(u', '.join(disambig))
@ -421,7 +442,7 @@ def choose_candidate(candidates, singleton, rec, cur_artist=None,
show_change(cur_artist, cur_album, match)
# Exact match => tag automatically if we're not in timid mode.
if rec == autotag.RECOMMEND_STRONG and not config['import']['timid']:
if rec == recommendation.strong and not config['import']['timid']:
return match
# Ask for confirmation.
@ -492,7 +513,7 @@ class TerminalImportSession(importer.ImportSession):
"""
# Show what we're tagging.
print_()
print_(task.path)
print_(displayable_path(task.paths, u'\n'))
# Take immediate action if appropriate.
action = _summary_judment(task.rec)
@ -635,11 +656,11 @@ def import_files(lib, paths, query):
if config['import']['log'].get() is not None:
logpath = config['import']['log'].as_filename()
try:
logfile = open(syspath(logpath), 'a')
logfile = codecs.open(syspath(logpath), 'a', 'utf8')
except IOError:
raise ui.UserError(u"could not open log file for writing: %s" %
displayable_path(logpath))
print('import started', time.asctime(), file=logfile)
print(u'import started', time.asctime(), file=logfile)
else:
logfile = None
@ -654,7 +675,7 @@ def import_files(lib, paths, query):
finally:
# If we were logging, close the file.
if logfile:
print('', file=logfile)
print(u'', file=logfile)
logfile.close()
# Emit event.

View file

@ -140,9 +140,9 @@ def ancestry(path, pathmod=None):
return out
def sorted_walk(path, ignore=()):
"""Like ``os.walk``, but yields things in sorted, breadth-first
order. Directory and file names matching any glob pattern in
``ignore`` are skipped.
"""Like ``os.walk``, but yields things in case-insensitive sorted,
breadth-first order. Directory and file names matching any glob
pattern in ``ignore`` are skipped.
"""
# Make sure the path isn't a Unicode string.
path = bytestring_path(path)
@ -169,9 +169,9 @@ def sorted_walk(path, ignore=()):
else:
files.append(base)
# Sort lists and yield the current level.
dirs.sort()
files.sort()
# Sort lists (case-insensitive) and yield the current level.
dirs.sort(key=bytes.lower)
files.sort(key=bytes.lower)
yield (path, dirs, files)
# Recurse into directories.
@ -193,11 +193,25 @@ def mkdirall(path):
raise FilesystemError(exc, 'create', (ancestor,),
traceback.format_exc())
def fnmatch_all(names, patterns):
"""Determine whether all strings in `names` match at least one of
the `patterns`, which should be shell glob expressions.
"""
for name in names:
matches = False
for pattern in patterns:
matches = fnmatch.fnmatch(name, pattern)
if matches:
break
if not matches:
return False
return True
def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively remove
path's ancestry up to root (which is never removed) where there are
empty directories. If path is not contained in root, then nothing is
removed. Filenames in clutter are ignored when determining
removed. Glob patterns in clutter are ignored when determining
emptiness. If root is not provided, then only path may be removed
(i.e., no recursive removal).
"""
@ -224,8 +238,7 @@ def prune_dirs(path, root=None, clutter=('.DS_Store', 'Thumbs.db')):
if not os.path.exists(directory):
# Directory gone already.
continue
if all(fn in clutter for fn in os.listdir(directory)):
if fnmatch_all(os.listdir(directory), clutter):
# Directory contains only clutter (or nothing).
try:
shutil.rmtree(directory)
@ -295,11 +308,14 @@ def bytestring_path(path, pathmod=None):
except (UnicodeError, LookupError):
return path.encode('utf8')
def displayable_path(path):
def displayable_path(path, separator=u'; '):
"""Attempts to decode a bytestring path to a unicode object for the
purpose of displaying it to the user.
purpose of displaying it to the user. If the `path` argument is a
list or a tuple, the elements are joined with `separator`.
"""
if isinstance(path, unicode):
if isinstance(path, (list, tuple)):
return separator.join(displayable_path(p) for p in path)
elif isinstance(path, unicode):
return path
elif not isinstance(path, str):
# A non-string object: just get its unicode representation.
@ -478,35 +494,6 @@ def truncate_path(path, pathmod=None, length=MAX_FILENAME_LENGTH):
return pathmod.join(*out)
def sanitize_for_path(value, pathmod=None, key=None):
"""Sanitize the value for inclusion in a path: replace separators
with _, etc. Doesn't guarantee that the whole path will be valid;
you should still call sanitize_path on the complete path.
"""
pathmod = pathmod or os.path
if isinstance(value, basestring):
for sep in (pathmod.sep, pathmod.altsep):
if sep:
value = value.replace(sep, u'_')
elif key in ('track', 'tracktotal', 'disc', 'disctotal'):
# Pad indices with zeros.
value = u'%02i' % (value or 0)
elif key == 'year':
value = u'%04i' % (value or 0)
elif key in ('month', 'day'):
value = u'%02i' % (value or 0)
elif key == 'bitrate':
# Bitrate gets formatted as kbps.
value = u'%ikbps' % ((value or 0) // 1000)
elif key == 'samplerate':
# Sample rate formatted as kHz.
value = u'%ikHz' % ((value or 0) // 1000)
else:
value = unicode(value)
return value
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
if value.lower() in ('yes', '1', 'true', 't', 'y'):
@ -614,3 +601,17 @@ def command_output(cmd):
if proc.returncode:
raise subprocess.CalledProcessError(proc.returncode, cmd)
return stdout
def max_filename_length(path, fallback=MAX_FILENAME_LENGTH):
"""Attempt to determine the maximum filename length for the
filesystem containing `path`. If it cannot be determined, return a
predetermined fallback value.
"""
if hasattr(os, 'statvfs'):
try:
res = os.statvfs(path)
except OSError:
return fallback
return res[9]
else:
return fallback

View file

@ -36,6 +36,8 @@ CONFIG_FILENAME = 'config.yaml'
DEFAULT_FILENAME = 'config_default.yaml'
ROOT_NAME = 'root'
YAML_TAB_PROBLEM = "found character '\\t' that cannot start any token"
# Utilities.
@ -81,9 +83,19 @@ class ConfigReadError(ConfigError):
def __init__(self, filename, reason=None):
self.filename = filename
self.reason = reason
message = 'file {0} could not be read'.format(filename)
if reason:
if isinstance(reason, yaml.scanner.ScannerError) and \
reason.problem == YAML_TAB_PROBLEM:
# Special-case error message for tab indentation in YAML markup.
message += ': found tab character at line {0}, column {1}'.format(
reason.problem_mark.line + 1,
reason.problem_mark.column + 1,
)
elif reason:
# Generic error message uses exception's message.
message += ': {0}'.format(reason)
super(ConfigReadError, self).__init__(message)
@ -345,7 +357,7 @@ class ConfigView(object):
if value not in choices:
raise ConfigValueError(
'{0} must be one of {1}, not {2}'.format(
self.name, repr(value), repr(list(choices))
self.name, repr(list(choices)), repr(value)
)
)

View file

@ -950,17 +950,19 @@ class Server(BaseServer):
# Searching.
tagtype_map = {
u'Artist': u'artist',
u'Album': u'album',
u'Title': u'title',
u'Track': u'track',
u'Artist': u'artist',
u'Album': u'album',
u'Title': u'title',
u'Track': u'track',
u'AlbumArtist': u'albumartist',
u'AlbumArtistSort': u'albumartist_sort',
# Name?
u'Genre': u'genre',
u'Date': u'year',
u'Composer': u'composer',
u'Genre': u'genre',
u'Date': u'year',
u'Composer': u'composer',
# Performer?
u'Disc': u'disc',
u'filename': u'path', # Suspect.
u'Disc': u'disc',
u'filename': u'path', # Suspect.
}
def cmd_tagtypes(self, conn):

View file

@ -57,6 +57,11 @@ def fetch_item_tempo(lib, loglevel, item, write):
def get_tempo(artist, title):
"""Get the tempo for a song."""
# We must have sufficient metadata for the lookup. Otherwise the API
# will just complain.
if not artist or not title:
return None
for i in range(RETRIES):
try:
# Unfortunately, all we can do is search by artist and title.

View file

@ -162,7 +162,7 @@ def _source_urls(album):
if url:
yield url
def art_for_album(album, path, maxwidth=None, local_only=False):
def art_for_album(album, paths, maxwidth=None, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `maxwidth`, then images are
resized to this maximum pixel size. If `local_only`, then only local
@ -172,8 +172,11 @@ def art_for_album(album, path, maxwidth=None, local_only=False):
out = None
# Local art.
if isinstance(path, basestring):
out = art_in_path(path)
if paths:
for path in paths:
out = art_in_path(path)
if out:
break
# Web art sources.
if not local_only and not out:
@ -243,7 +246,7 @@ class FetchArtPlugin(BeetsPlugin):
return
album = session.lib.get_album(task.album_id)
path = art_for_album(album, task.path, self.maxwidth, local)
path = art_for_album(album, task.paths, self.maxwidth, local)
if path:
self.art_paths[task] = path

View file

@ -84,10 +84,11 @@ def _write_m3u(m3u_path, items_paths):
def _record_items(lib, basename, items):
"""Records relative paths to the given items for each feed format
"""
feedsdir = config['importfeeds']['dir'].as_filename()
feedsdir = bytestring_path(config['importfeeds']['dir'].as_filename())
formats = config['importfeeds']['formats'].as_str_seq()
relative_to = config['importfeeds']['relative_to'].get() \
or config['importfeeds']['dir'].as_filename()
relative_to = bytestring_path(relative_to)
paths = []
for item in items:
@ -96,7 +97,9 @@ def _record_items(lib, basename, items):
))
if 'm3u' in formats:
basename = config['importfeeds']['m3u_name'].get(unicode).encode('utf8')
basename = bytestring_path(
config['importfeeds']['m3u_name'].get(unicode)
)
m3u_path = os.path.join(feedsdir, basename)
_write_m3u(m3u_path, paths)
@ -106,9 +109,9 @@ def _record_items(lib, basename, items):
if 'link' in formats:
for path in paths:
dest = os.path.join(feedsdir, normpath(os.path.basename(path)))
if not os.path.exists(dest):
os.symlink(path, dest)
dest = os.path.join(feedsdir, os.path.basename(path))
if not os.path.exists(syspath(dest)):
os.symlink(syspath(path), syspath(dest))
@ImportFeedsPlugin.listen('library_opened')
def library_opened(lib):

View file

@ -89,8 +89,6 @@ def compile_inline(python_code):
return _func_func
class InlinePlugin(BeetsPlugin):
template_fields = {}
def __init__(self):
super(InlinePlugin, self).__init__()
@ -103,4 +101,4 @@ class InlinePlugin(BeetsPlugin):
log.debug(u'adding template field %s' % key)
func = compile_inline(view.get(unicode))
if func is not None:
InlinePlugin.template_fields[key] = func
self.template_fields[key] = func

View file

@ -31,8 +31,9 @@ import yaml
from beets import plugins
from beets import ui
from beets.util import normpath
from beets.util import normpath, plurality
from beets import config
from beets import library
log = logging.getLogger('beets')
@ -45,6 +46,9 @@ PYLAST_EXCEPTIONS = (
pylast.NetworkError,
)
# Core genre identification routine.
def _tags_for(obj):
"""Given a pylast entity (album or track), returns a list of
tag names for that entity. Returns an empty list if the entity is
@ -64,9 +68,29 @@ def _tags_for(obj):
log.debug(u'last.fm tags: %s' % unicode(tags))
return tags
def _tags_to_genre(tags):
"""Given a tag list, returns a genre. Returns the first tag that is
present in the genre whitelist or None if no tag is suitable.
def _is_allowed(genre):
"""Determine whether the genre is present in the whitelist,
returning a boolean.
"""
if genre is None:
return False
if genre.lower() in options['whitelist']:
return True
return False
def _find_allowed(genres):
"""Return the first string in the sequence `genres` that is present
in the genre whitelist or None if no genre is suitable.
"""
for genre in list(genres):
if _is_allowed(genre):
return genre.title() # Title case.
return None
def _strings_to_genre(tags):
"""Given a list of strings, return a genre. Returns the first string
that is present in the genre whitelist (or the canonicalization
tree) or None if no tag is suitable.
"""
if not tags:
return None
@ -76,12 +100,19 @@ def _tags_to_genre(tags):
if options.get('c14n'):
# Use the canonicalization tree.
for tag in tags:
genre = find_allowed(find_parents(tag, options['branches']))
if genre:
return genre
return _find_allowed(find_parents(tag, options['branches']))
else:
# Just use the flat whitelist.
return find_allowed(tags)
return _find_allowed(tags)
def fetch_genre(lastfm_obj):
"""Return the genre for a pylast entity or None if no suitable genre
can be found.
"""
return _strings_to_genre(_tags_for(lastfm_obj))
# Canonicalization tree processing.
def flatten_tree(elem, path, branches):
"""Flatten nested lists/dictionaries into lists of strings
@ -111,14 +142,51 @@ def find_parents(candidate, branches):
continue
return [candidate]
def find_allowed(genres):
"""Returns the first genre that is present in the genre whitelist or
None if no genre is suitable.
# Cached entity lookups.
_genre_cache = {}
def _cached_lookup(entity, method, *args):
"""Get a genre based on the named entity using the callable `method`
whose arguments are given in the sequence `args`. The genre lookup
is cached based on the entity name and the arguments.
"""
for genre in list(genres):
if genre.lower() in options['whitelist']:
return genre.title()
return None
# Shortcut if we're missing metadata.
if any(not s for s in args):
return None
key = u'{0}.{1}'.format(entity, u'-'.join(unicode(a) for a in args))
if key in _genre_cache:
return _genre_cache[key]
else:
genre = fetch_genre(method(*args))
_genre_cache[key] = genre
return genre
def fetch_album_genre(obj):
"""Return the album genre for this Item or Album.
"""
return _cached_lookup(u'album', LASTFM.get_album, obj.albumartist,
obj.album)
def fetch_album_artist_genre(obj):
"""Return the album artist genre for this Item or Album.
"""
return _cached_lookup(u'artist', LASTFM.get_artist, obj.albumartist)
def fetch_artist_genre(item):
"""Returns the track artist genre for this Item.
"""
return _cached_lookup(u'artist', LASTFM.get_artist, item.artist)
def fetch_track_genre(obj):
"""Returns the track genre for this Item.
"""
return _cached_lookup(u'track', LASTFM.get_track, obj.artist, obj.title)
# Main plugin logic.
options = {
'whitelist': None,
@ -128,14 +196,18 @@ options = {
class LastGenrePlugin(plugins.BeetsPlugin):
def __init__(self):
super(LastGenrePlugin, self).__init__()
self.import_stages = [self.imported]
self.config.add({
'whitelist': os.path.join(os.path.dirname(__file__), 'genres.txt'),
'fallback': None,
'canonical': None,
'source': 'album',
'force': False,
'auto': True,
})
if self.config['auto']:
self.import_stages = [self.imported]
# Read the whitelist file.
wl_filename = self.config['whitelist'].as_filename()
@ -161,62 +233,139 @@ class LastGenrePlugin(plugins.BeetsPlugin):
options['branches'] = branches
options['c14n'] = True
@property
def sources(self):
"""A tuple of allowed genre sources. May contain 'track',
'album', or 'artist.'
"""
source = self.config['source'].as_choice(('track', 'album', 'artist'))
if source == 'track':
return 'track', 'album', 'artist'
elif source == 'album':
return 'album', 'artist'
elif source == 'artist':
return 'artist',
def _get_genre(self, obj):
"""Get the genre string for an Album or Item object based on
self.sources. Return a `(genre, source)` pair. The
prioritization order is:
- track (for Items only)
- album
- artist
- original
- fallback
- None
"""
# Shortcut to existing genre if not forcing.
if not self.config['force'] and _is_allowed(obj.genre):
return obj.genre, 'keep'
# Track genre (for Items only).
if isinstance(obj, library.Item):
if 'track' in self.sources:
result = fetch_track_genre(obj)
if result:
return result, 'track'
# Album genre.
if 'album' in self.sources:
result = fetch_album_genre(obj)
if result:
return result, 'album'
# Artist (or album artist) genre.
if 'artist' in self.sources:
result = None
if isinstance(obj, library.Item):
result = fetch_artist_genre(obj)
elif obj.albumartist != 'Various Artists':
result = fetch_album_artist_genre(obj)
else:
# For "Various Artists", pick the most popular track genre.
item_genres = []
for item in obj.items():
item_genre = None
if 'track' in self.sources:
item_genre = fetch_track_genre(item)
if not item_genre:
item_genre = fetch_artist_genre(item)
if item_genre:
item_genres.append(item_genre)
if item_genres:
result, _ = plurality(item_genres)
if result:
return result, 'artist'
# Filter the existing genre.
result = _strings_to_genre([obj.genre])
if result:
return result, 'original'
# Fallback string.
fallback = self.config['fallback'].get()
if fallback:
return fallback, 'fallback'
return None, None
def commands(self):
lastgenre_cmd = ui.Subcommand('lastgenre', help='fetch genres')
lastgenre_cmd.parser.add_option('-f', '--force', dest='force',
action='store_true',
help='re-download genre when already present')
lastgenre_cmd.parser.add_option('-s', '--source', dest='source',
type='string',
help='genre source: artist, album, or track')
def lastgenre_func(lib, opts, args):
# The "write to files" option corresponds to the
# import_write config value.
write = config['import']['write'].get(bool)
self.config.set_args(opts)
for album in lib.albums(ui.decargs(args)):
tags = []
lastfm_obj = LASTFM.get_album(album.albumartist, album.album)
if album.genre:
tags.append(album.genre)
album.genre, src = self._get_genre(album)
log.info(u'genre for album {0} - {1} ({2}): {3}'.format(
album.albumartist, album.album, src, album.genre
))
tags.extend(_tags_for(lastfm_obj))
genre = _tags_to_genre(tags)
for item in album.items():
# If we're using track-level sources, also look up each
# track on the album.
if 'track' in self.sources:
item.genre, src = self._get_genre(item)
lib.store(item)
log.info(u'genre for track {0} - {1} ({2}): {3}'.format(
item.artist, item.title, src, item.genre
))
fallback_str = self.config['fallback'].get()
if not genre and fallback_str != None:
genre = fallback_str
log.debug(u'no last.fm genre found: fallback to %s' % genre)
if genre is not None:
log.debug(u'adding last.fm album genre: %s' % genre)
album.genre = genre
if write:
for item in album.items():
item.write()
item.write()
lastgenre_cmd.func = lastgenre_func
return [lastgenre_cmd]
def imported(self, session, task):
tags = []
"""Event hook called when an import task finishes."""
# Always force a "real" lookup during import.
if not self.config['force']:
self.config['force'] = True
if task.is_album:
album = session.lib.get_album(task.album_id)
lastfm_obj = LASTFM.get_album(album.albumartist, album.album)
if album.genre:
tags.append(album.genre)
album.genre, src = self._get_genre(album)
log.debug(u'added last.fm album genre ({0}): {1}'.format(
src, album.genre))
if 'track' in self.sources:
for item in album.items():
item.genre, src = self._get_genre(item)
log.debug(u'added last.fm item genre ({0}): {1}'.format(
src, item.genre))
session.lib.store(item)
else:
item = task.item
lastfm_obj = LASTFM.get_track(item.artist, item.title)
if item.genre:
tags.append(item.genre)
tags.extend(_tags_for(lastfm_obj))
genre = _tags_to_genre(tags)
fallback_str = self.config['fallback'].get()
if not genre and fallback_str != None:
genre = fallback_str
log.debug(u'no last.fm genre found: fallback to %s' % genre)
if genre is not None:
log.debug(u'adding last.fm album genre: %s' % genre)
if task.is_album:
album = session.lib.get_album(task.album_id)
album.genre = genre
else:
item.genre = genre
session.lib.store(item)
item.genre, src = self._get_genre(item)
log.debug(u'added last.fm item genre ({0}): {1}'.format(
src, item.genre))
session.lib.store(item)

View file

@ -44,7 +44,6 @@ def rewriter(field, rules):
class RewritePlugin(BeetsPlugin):
def __init__(self):
super(RewritePlugin, self).__init__()
BeetsPlugin.template_fields = {}
self.config.add({})
@ -68,5 +67,4 @@ class RewritePlugin(BeetsPlugin):
# Replace each template field with the new rewriter function.
for fieldname, fieldrules in rules.iteritems():
RewritePlugin.template_fields[fieldname] = \
rewriter(fieldname, fieldrules)
self.template_fields[fieldname] = rewriter(fieldname, fieldrules)

View file

@ -4,35 +4,97 @@ Changelog
1.1b2 (in development)
----------------------
This version introduces one **change to the default behavior** that you should
be aware of. Previously, when importing new albums matched in MusicBrainz, the
date fields (``year``, ``month``, and ``day``) would be set to the release date
of the *original* version of the album, as opposed to the specific date of the
release selected. Now, these fields reflect the specific release and
``original_year``, etc., reflect the earlier release date. If you want the old
behavior, just set :ref:`original_date` to true in your config file.
New configuration options:
* :ref:`default_action` lets you determine the default (just-hit-return) option
is when considering a candidate.
* :ref:`none_rec_action` lets you skip the prompt, and automatically choose an
action, when there is no good candidate. Thanks to mrmachine.
action, when there is no good candidate. Thanks to Tai Lee.
* :ref:`max_rec` lets you define a maximum recommendation for albums with
missing/extra tracks or differing track lengths/numbers. Thanks again to Tai
Lee.
* :ref:`original_date` determines whether, when importing new albums, the
``year``, ``month``, and ``day`` fields should reflect the specific (e.g.,
reissue) release date or the original release date. Note that the original
release date is always available as ``original_year``, etc.
* :ref:`clutter` controls which files should be ignored when cleaning up empty
directories. Thanks to Steinþór Pálsson.
* :doc:`/plugins/lastgenre`: A new configuration option lets you choose to
retrieve artist-level tags as genres instead of album- or track-level tags.
Thanks to Peter Fern and Peter Schnebel.
* :ref:`max_filename_length` controls truncation of long filenames. Also, beets
now tries to determine the filesystem's maximum length automatically if you
leave this option unset.
* You can now customize the character substituted for path separators (e.g., /)
in filenames via ``path_sep_replace``. The default is an underscore. Use this
setting with caution.
Other new stuff:
* Support for Windows Media/ASF audio files. Thanks to Dave Hayes.
* New :doc:`/plugins/smartplaylist`: generate and maintain m3u playlist files
based on beets queries. Thanks to Dang Mai Hai.
* Two new plugin events were added: *database_change* and *cli_exit*. Thanks
again to Dang Mai Hai.
* Track titles in the importer's difference display are now broken across two
lines for readability. Thanks to mrmachine.
* ReplayGain tags on MPEG-4/AAC files are now supported. And, even more
astonishingly, ReplayGain values in MP3 and AAC files are now compatible with
`iTunes Sound Check`_. Thanks to Dave Hayes.
* Track titles in the importer UI's difference display are now either aligned
vertically or broken across two lines for readability. Thanks to Tai Lee.
* Albums and items have new fields reflecting the *original* release date
(``original_year``, ``original_month``, and ``original_day``). Previously,
when tagging from MusicBrainz, *only* the original date was stored; now, the
old fields refer to the *specific* release date (e.g., when the album was
reissued).
* Some changes to the way candidates are recommended for selection, thanks to
mrmachine:
Tai Lee:
* Partial album matches are never "strong" recommendations.
* According to the new :ref:`max_rec` configuration option, partial album
matches are downgraded to a "low" recommendation by default.
* When a match isn't great but is either better than all the others or the
only match, it is given a "low" (rather than "medium") recommendation.
* There is no prompt default (i.e., input is required) when matches are
bad: "low" or "none" recommendations or when choosing a candidate
other than the first.
* Album listings in the importer UI now show the release medium (CD, LP,
etc.). Thanks to Peter Schnebel.
* The importer's heuristic for coalescing the directories in a multi-disc album
has been improved. It can now detect when two directories alongside each
other share a similar prefix but a different number (e.g., "Album Disc 1" and
"Album Disc 2") even when they are not alone in a common parent directory.
Thanks once again to Tai Lee.
* Album listings in the importer UI now show the release medium (CD, Vinyl,
3xCD, etc.) as well as the disambiguation string. Thanks to Peter Schnebel.
* :doc:`/plugins/lastgenre`: The plugin can now get different genres for
individual tracks on an album. Thanks to Peter Schnebel.
* When getting data from MusicBrainz, the album disambiguation string
(``albumdisambig``) now reflects both the release and the release group.
* :doc:`/plugins/mpdupdate`: Sends an update message whenever *anything* in the
database changes---not just when importing. Thanks to Dang Mai Hai.
* When the importer UI shows a difference in track numbers or durations, they
are now colorized based on the *suffixes* that differ. For example, when
showing the difference between 2:01 and 2:09, only the last digit will be
highlighted.
* The importer UI no longer shows a change when the track length difference is
less than 10 seconds. (This threshold was previously 2 seconds.)
* Two new plugin events were added: *database_change* and *cli_exit*. Thanks
again to Dang Mai Hai.
* Plugins are now loaded in the order they appear in the config file. Thanks to
Dang Mai Hai.
* :doc:`/plugins/bpd`: Browse by album artist and album artist sort name.
Thanks to Steinþór Pálsson.
* :doc:`/plugins/echonest_tempo`: Don't attempt a lookup when the artist or
track title is missing.
* Fix an error when migrating the ``.beetsstate`` file on Windows.
* A nicer error message is now given when the configuration file contains tabs.
(YAML doesn't like tabs.)
.. _iTunes Sound Check: http://support.apple.com/kb/HT2425
1.1b1 (January 29, 2013)
------------------------

View file

@ -47,11 +47,9 @@ all of these limitations.
currently be autotaggable. (This will change eventually.)
There is one exception to this rule: directories that look like separate parts
of a *multi-disc album* are tagged together as a single release. This
situation is detected by looking at the names of directories. If one directory
has sub-directories with, for example, "disc 1" and "disc 2" in their names,
they get lumped together as a single album. The marker words for this feature
are "part", "volume", "vol.", "disc", and "CD".
of a *multi-disc album* are tagged together as a single release. If two
adjacent albums have a common prefix, followed by "disc" or "CD" and then a
number, they are tagged together.
* The music may have bad tags, but it's not completely untagged. (This is
actually not a hard-and-fast rule: using the *E* option described below, it's

View file

@ -51,8 +51,7 @@ be turned into coarser-grained ones that are present in the whitelist. This
works using a tree of nested genre names, represented using `YAML`_, where the
leaves of the tree represent the most specific genres.
To enable canonicalization, first install the `pyyaml`_ module (``pip install
pyyaml``). Then set the ``canonical`` configuration value::
To enable canonicalization, set the ``canonical`` configuration value::
lastgenre:
canonical: ''
@ -62,7 +61,21 @@ tree. You can also set it to a path, just like the ``whitelist`` config value,
to use your own tree.
.. _YAML: http://www.yaml.org/
.. _pyyaml: http://pyyaml.org/
Genre Source
------------
When looking up genres for albums or individual tracks, you can choose whether
to use Last.fm tags on the album, the artist, or the track. For example, you
might want all the albums for a certain artist to carry the same genre. Set the
``source`` configuration value to "album", "track", or "artist", like so::
lastgenre:
source: artist
The default is "album". When set to "track", the plugin will fetch *both*
album-level and track-level genres for your music when importing albums.
Running Manually
@ -71,3 +84,6 @@ Running Manually
In addition to running automatically on import, the plugin can also run manually
from the command line. Use the command ``beet lastgenre [QUERY]`` to fetch
genres for albums matching a certain query.
To disable automatic genre fetching on import, set the ``auto`` config option
to false.

View file

@ -49,3 +49,8 @@ from the command line::
$ beet splupdate
which will generate your new smart playlists.
You can also use this plugin together with the :doc:`mpdupdate`, in order to
automatically notify MPD of the playlist change, by adding ``mpdupdate`` to
the ``plugins`` line in your config file *after* the ``smartplaylist``
plugin.

View file

@ -23,8 +23,9 @@ file will look like this::
key: value
foo: bar
If you have questions about more sophisticated syntax, take a look at the
`YAML`_ documentation.
In YAML, you will need to use spaces (not tabs!) to indent some lines. If you
have questions about more sophisticated syntax, take a look at the `YAML`_
documentation.
.. _YAML: http://yaml.org/
@ -148,6 +149,17 @@ Format to use when listing *albums* with :ref:`list-cmd` and other
commands. Defaults to ``$albumartist - $album``. The ``-f`` command-line
option overrides this setting.
.. _original_date:
original_date
~~~~~~~~~~~~~
Either ``yes`` or ``no``, indicating whether matched albums should have their
``year``, ``month``, and ``day`` fields set to the release date of the
*original* version of an album rather than the selected version of the release.
That is, if this option is turned on, then ``year`` will always equal
``original_year`` and so on. Default: ``no``.
.. _per_disc_numbering:
per_disc_numbering
@ -178,6 +190,25 @@ environment variables.
.. _known to python: http://docs.python.org/2/library/codecs.html#standard-encodings
.. _clutter:
clutter
~~~~~~~
When beets imports all the files in a directory, it tries to remove the
directory if it's empty. A directory is considered empty if it only contains
files whose names match the glob patterns in `clutter`, which should be a list
of strings. The default list consists of "Thumbs.DB" and ".DS_Store".
.. _max_filename_length:
max_filename_length
~~~~~~~~~~~~~~~~~~~
Set the maximum number of characters in a filename, after which names will be
truncated. By default, beets tries to ask the filesystem for the correct
maximum.
Importer Options
----------------
@ -335,6 +366,34 @@ and the next-best match is above the *gap* threshold, the importer will suggest
that match but not automatically confirm it. Otherwise, you'll see a list of
options to choose from.
.. _max_rec:
max_rec
~~~~~~~
As mentioned above, autotagger matches have *recommendations* that control how
the UI behaves for a certain quality of match. The recommendation for a certain
match is usually based on the distance calculation. But you can also control
the recommendation for certain specific situations by defining *maximum*
recommendations when (a) a match has missing/extra tracks; (b) the track number
for at least one track differs; or (c) the track length for at least one track
differs.
To define maxima, use keys under ``max_rec:`` in the ``match`` section::
match:
max_rec:
partial: medium
tracklength: strong
tracknumber: strong
If a recommendation is higher than the configured maximum and the condition is
met, the recommendation will be downgraded. The maximum for each condition can
be one of ``none``, ``low``, ``medium`` or ``strong``. When the maximum
recommendation is ``strong``, no "downgrading" occurs for that situation.
The above example shows the default ``max_rec`` settings.
.. _path-format-config:
Path Format Configuration

View file

@ -170,10 +170,9 @@ Ordinary metadata:
* genre
* composer
* grouping
* year
* month
* day
* track
* year, month, day: The release date of the specific release.
* original_year, original_month, original_day: The release date of the original
version of the album.
* tracktotal
* disc
* disctotal

Binary file not shown.

View file

@ -105,26 +105,26 @@ class CombinedTest(unittest.TestCase):
_common.touch(os.path.join(self.dpath, 'a.jpg'))
fetchart.urllib.urlretrieve = MockUrlRetrieve('image/jpeg')
album = _common.Bag(asin='xxxx')
artpath = fetchart.art_for_album(album, self.dpath)
artpath = fetchart.art_for_album(album, [self.dpath])
self.assertEqual(artpath, os.path.join(self.dpath, 'a.jpg'))
def test_main_interface_falls_back_to_amazon(self):
fetchart.urllib.urlretrieve = MockUrlRetrieve('image/jpeg')
album = _common.Bag(asin='xxxx')
artpath = fetchart.art_for_album(album, self.dpath)
artpath = fetchart.art_for_album(album, [self.dpath])
self.assertNotEqual(artpath, None)
self.assertFalse(artpath.startswith(self.dpath))
def test_main_interface_tries_amazon_before_aao(self):
fetchart.urllib.urlretrieve = MockUrlRetrieve('image/jpeg')
album = _common.Bag(asin='xxxx')
fetchart.art_for_album(album, self.dpath)
fetchart.art_for_album(album, [self.dpath])
self.assertFalse(self.urlopen_called)
def test_main_interface_falls_back_to_aao(self):
fetchart.urllib.urlretrieve = MockUrlRetrieve('text/html')
album = _common.Bag(asin='xxxx')
fetchart.art_for_album(album, self.dpath)
fetchart.art_for_album(album, [self.dpath])
self.assertTrue(self.urlopen_called)
def test_main_interface_uses_caa_when_mbid_available(self):
@ -139,7 +139,7 @@ class CombinedTest(unittest.TestCase):
mock_retrieve = MockUrlRetrieve('image/jpeg')
fetchart.urllib.urlretrieve = mock_retrieve
album = _common.Bag(mb_albumid='releaseid', asin='xxxx')
artpath = fetchart.art_for_album(album, self.dpath, local_only=True)
artpath = fetchart.art_for_album(album, [self.dpath], local_only=True)
self.assertEqual(artpath, None)
self.assertFalse(self.urlopen_called)
self.assertFalse(mock_retrieve.fetched)
@ -149,7 +149,7 @@ class CombinedTest(unittest.TestCase):
mock_retrieve = MockUrlRetrieve('image/jpeg')
fetchart.urllib.urlretrieve = mock_retrieve
album = _common.Bag(mb_albumid='releaseid', asin='xxxx')
artpath = fetchart.art_for_album(album, self.dpath, local_only=True)
artpath = fetchart.art_for_album(album, [self.dpath], local_only=True)
self.assertEqual(artpath, os.path.join(self.dpath, 'a.jpg'))
self.assertFalse(self.urlopen_called)
self.assertFalse(mock_retrieve.fetched)

View file

@ -322,19 +322,34 @@ class MultiDiscAlbumsInDirTest(unittest.TestCase):
os.mkdir(self.base)
self.dirs = [
os.path.join(self.base, 'album1'),
os.path.join(self.base, 'album1', 'disc 1'),
os.path.join(self.base, 'album1', 'disc 2'),
os.path.join(self.base, 'dir2'),
os.path.join(self.base, 'dir2', 'disc 1'),
os.path.join(self.base, 'dir2', 'something'),
# Nested album, multiple subdirs.
# Also, false positive marker in root dir, and subtitle for disc 3.
os.path.join(self.base, 'ABCD1234'),
os.path.join(self.base, 'ABCD1234', 'cd 1'),
os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus'),
# Nested album, single subdir.
# Also, punctuation between marker and disc number.
os.path.join(self.base, 'album'),
os.path.join(self.base, 'album', 'cd _ 1'),
# Flattened album, case typo.
# Also, false positive marker in parent dir.
os.path.join(self.base, 'artist [CD5]'),
os.path.join(self.base, 'artist [CD5]', 'CAT disc 1'),
os.path.join(self.base, 'artist [CD5]', 'CAt disc 2'),
# Single disc album, sorted between CAT discs.
os.path.join(self.base, 'artist [CD5]', 'CATS'),
]
self.files = [
os.path.join(self.base, 'album1', 'disc 1', 'song1.mp3'),
os.path.join(self.base, 'album1', 'disc 2', 'song2.mp3'),
os.path.join(self.base, 'album1', 'disc 2', 'song3.mp3'),
os.path.join(self.base, 'dir2', 'disc 1', 'song4.mp3'),
os.path.join(self.base, 'dir2', 'something', 'song5.mp3'),
os.path.join(self.base, 'ABCD1234', 'cd 1', 'song1.mp3'),
os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus', 'song2.mp3'),
os.path.join(self.base, 'ABCD1234', 'cd 3 - bonus', 'song3.mp3'),
os.path.join(self.base, 'album', 'cd _ 1', 'song4.mp3'),
os.path.join(self.base, 'artist [CD5]', 'CAT disc 1', 'song5.mp3'),
os.path.join(self.base, 'artist [CD5]', 'CAt disc 2', 'song6.mp3'),
os.path.join(self.base, 'artist [CD5]', 'CATS', 'song7.mp3'),
]
for path in self.dirs:
@ -345,25 +360,35 @@ class MultiDiscAlbumsInDirTest(unittest.TestCase):
def tearDown(self):
shutil.rmtree(self.base)
def test_coalesce_multi_disc_album(self):
def test_coalesce_nested_album_multiple_subdirs(self):
albums = list(autotag.albums_in_dir(self.base))
self.assertEquals(len(albums), 3)
self.assertEquals(len(albums), 4)
root, items = albums[0]
self.assertEquals(root, os.path.join(self.base, 'album1'))
self.assertEquals(root, self.dirs[0:3])
self.assertEquals(len(items), 3)
def test_separate_red_herring(self):
def test_coalesce_nested_album_single_subdir(self):
albums = list(autotag.albums_in_dir(self.base))
root, items = albums[1]
self.assertEquals(root, os.path.join(self.base, 'dir2', 'disc 1'))
self.assertEquals(root, self.dirs[3:5])
self.assertEquals(len(items), 1)
def test_coalesce_flattened_album_case_typo(self):
albums = list(autotag.albums_in_dir(self.base))
root, items = albums[2]
self.assertEquals(root, os.path.join(self.base, 'dir2', 'something'))
self.assertEquals(root, self.dirs[6:8])
self.assertEquals(len(items), 2)
def test_single_disc_album(self):
albums = list(autotag.albums_in_dir(self.base))
root, items = albums[3]
self.assertEquals(root, self.dirs[8:])
self.assertEquals(len(items), 1)
def test_do_not_yield_empty_album(self):
# Remove all the MP3s.
for path in self.files:
os.remove(path)
albums = list(autotag.albums_in_dir(self.base))
self.assertEquals(len(albums), 0)

View file

@ -347,19 +347,19 @@ class DestinationTest(unittest.TestCase):
def test_component_sanitize_replaces_separators(self):
name = posixpath.join('a', 'b')
newname = util.sanitize_for_path(name, posixpath)
newname = beets.library.format_for_path(name, None, posixpath)
self.assertNotEqual(name, newname)
def test_component_sanitize_pads_with_zero(self):
name = util.sanitize_for_path(1, posixpath, 'track')
name = beets.library.format_for_path(1, 'track', posixpath)
self.assertTrue(name.startswith('0'))
def test_component_sanitize_uses_kbps_bitrate(self):
val = util.sanitize_for_path(12345, posixpath, 'bitrate')
val = beets.library.format_for_path(12345, 'bitrate', posixpath)
self.assertEqual(val, u'12kbps')
def test_component_sanitize_uses_khz_samplerate(self):
val = util.sanitize_for_path(12345, posixpath, 'samplerate')
val = beets.library.format_for_path(12345, 'samplerate', posixpath)
self.assertEqual(val, u'12kHz')
def test_artist_falls_back_to_albumartist(self):

View file

@ -332,6 +332,14 @@ class ImportApplyTest(_common.TestCase):
_call_stages(self.session, [self.i], self.info, toppath=self.srcdir)
self.assertNotExists(os.path.dirname(self.srcpath))
def test_apply_with_move_prunes_with_extra_clutter(self):
f = open(os.path.join(self.srcdir, 'testalbum', 'alog.log'), 'w')
f.close()
config['clutter'] = ['*.log']
config['import']['move'] = True
_call_stages(self.session, [self.i], self.info, toppath=self.srcdir)
self.assertNotExists(os.path.dirname(self.srcpath))
def test_manipulate_files_with_null_move(self):
"""It should be possible to "move" a file even when the file is
already at the destination.
@ -582,7 +590,7 @@ class InferAlbumDataTest(_common.TestCase):
i1.mb_albumartistid = i2.mb_albumartistid = i3.mb_albumartistid = ''
self.items = [i1, i2, i3]
self.task = importer.ImportTask(path='a path', toppath='top path',
self.task = importer.ImportTask(paths=['a path'], toppath='top path',
items=self.items)
self.task.set_null_candidates()
@ -677,7 +685,7 @@ class DuplicateCheckTest(_common.TestCase):
artist = artist or item.albumartist
album = album or item.album
task = importer.ImportTask(path='a path', toppath='top path',
task = importer.ImportTask(paths=['a path'], toppath='top path',
items=[item])
task.set_candidates(artist, album, None, None)
if asis:

View file

@ -23,11 +23,12 @@ class MBAlbumInfoTest(unittest.TestCase):
'title': 'ALBUM TITLE',
'id': 'ALBUM ID',
'asin': 'ALBUM ASIN',
'disambiguation': 'R_DISAMBIGUATION',
'release-group': {
'type': 'Album',
'first-release-date': date_str,
'id': 'RELEASE GROUP ID',
'disambiguation': 'DISAMBIGUATION',
'disambiguation': 'RG_DISAMBIGUATION',
},
'artist-credit': [
{
@ -94,7 +95,8 @@ class MBAlbumInfoTest(unittest.TestCase):
self.assertEqual(d.album_id, 'ALBUM ID')
self.assertEqual(d.artist, 'ARTIST NAME')
self.assertEqual(d.artist_id, 'ARTIST ID')
self.assertEqual(d.year, 1984)
self.assertEqual(d.original_year, 1984)
self.assertEqual(d.year, 3001)
self.assertEqual(d.artist_credit, 'ARTIST CREDIT')
def test_parse_release_type(self):
@ -105,9 +107,9 @@ class MBAlbumInfoTest(unittest.TestCase):
def test_parse_release_full_date(self):
release = self._make_release('1987-03-31')
d = mb.album_info(release)
self.assertEqual(d.year, 1987)
self.assertEqual(d.month, 3)
self.assertEqual(d.day, 31)
self.assertEqual(d.original_year, 1987)
self.assertEqual(d.original_month, 3)
self.assertEqual(d.original_day, 31)
def test_parse_tracks(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),
@ -173,8 +175,8 @@ class MBAlbumInfoTest(unittest.TestCase):
def test_parse_release_year_month_only(self):
release = self._make_release('1987-03')
d = mb.album_info(release)
self.assertEqual(d.year, 1987)
self.assertEqual(d.month, 3)
self.assertEqual(d.original_year, 1987)
self.assertEqual(d.original_month, 3)
def test_no_durations(self):
tracks = [self._make_track('TITLE', 'ID', None)]
@ -185,9 +187,9 @@ class MBAlbumInfoTest(unittest.TestCase):
def test_no_release_date(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertFalse(d.year)
self.assertFalse(d.month)
self.assertFalse(d.day)
self.assertFalse(d.original_year)
self.assertFalse(d.original_month)
self.assertFalse(d.original_day)
def test_various_artists_defaults_false(self):
release = self._make_release(None)
@ -247,7 +249,8 @@ class MBAlbumInfoTest(unittest.TestCase):
def test_parse_disambig(self):
release = self._make_release(None)
d = mb.album_info(release)
self.assertEqual(d.albumdisambig, 'DISAMBIGUATION')
self.assertEqual(d.albumdisambig,
'RG_DISAMBIGUATION, R_DISAMBIGUATION')
def test_parse_disctitle(self):
tracks = [self._make_track('TITLE ONE', 'ID ONE', 100.0 * 1000.0),

View file

@ -120,6 +120,10 @@ CORRECT_DICTS = {
'albumdisambig': u'',
'artist_credit': u'',
'albumartist_credit': u'',
'original_year': 0,
'original_month': 0,
'original_day': 0,
'original_date': datetime.date.min,
},
# Full release date.

View file

@ -455,7 +455,7 @@ class AutotagTest(_common.TestCase):
'path',
[_common.item()],
)
task.set_candidates('artist', 'album', [], autotag.RECOMMEND_NONE)
task.set_candidates('artist', 'album', [], autotag.recommendation.none)
session = _common.import_session(cli=True)
res = session.choose_match(task)
self.assertEqual(res, result)
@ -687,12 +687,12 @@ class ShowChangeTest(_common.TestCase):
def test_item_data_change(self):
self.items[0].title = 'different'
msg = self._show_change()
self.assertTrue('different ->\n the title' in msg)
self.assertTrue('different -> the title' in msg)
def test_item_data_change_with_unicode(self):
self.items[0].title = u'caf\xe9'
msg = self._show_change()
self.assertTrue(u'caf\xe9 ->\n the title' in msg.decode('utf8'))
self.assertTrue(u'caf\xe9 -> the title' in msg.decode('utf8'))
def test_album_data_change_with_unicode(self):
msg = self._show_change(cur_artist=u'caf\xe9',
@ -701,14 +701,14 @@ class ShowChangeTest(_common.TestCase):
def test_item_data_change_title_missing(self):
self.items[0].title = ''
msg = self._show_change()
self.assertTrue('file.mp3 ->\n the title' in msg)
msg = re.sub(r' +', ' ', self._show_change())
self.assertTrue('file.mp3 -> the title' in msg)
def test_item_data_change_title_missing_with_unicode_filename(self):
self.items[0].title = ''
self.items[0].path = u'/path/to/caf\xe9.mp3'.encode('utf8')
msg = self._show_change().decode('utf8')
self.assertTrue(u'caf\xe9.mp3 ->' in msg
msg = re.sub(r' +', ' ', self._show_change().decode('utf8'))
self.assertTrue(u'caf\xe9.mp3 -> the title' in msg
or u'caf.mp3 ->' in msg)
class PathFormatTest(_common.TestCase):