a bit more flake8 bashing

This commit is contained in:
Adrian Sampson 2014-04-12 14:03:00 -07:00
parent 91537bc3d1
commit 237458fbb6
5 changed files with 72 additions and 23 deletions

View file

@ -209,8 +209,10 @@ def string_dist(str1, str2):
an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text.
"""
if str1 is None and str2 is None: return 0.0
if str1 is None or str2 is None: return 1.0
if str1 is None and str2 is None:
return 0.0
if str1 is None or str2 is None:
return 1.0
str1 = str1.lower()
str2 = str2.lower()
@ -284,7 +286,6 @@ class Distance(object):
def __init__(self):
self._penalties = {}
@LazyClassProperty
def _weights(cls):
"""A dictionary from keys to floating-point weights.
@ -295,7 +296,6 @@ class Distance(object):
weights[key] = weights_view[key].as_number()
return weights
# Access the components and their aggregates.
@property
@ -341,7 +341,6 @@ class Distance(object):
# still get the items with the biggest distance first.
return sorted(list_, key=lambda (key, dist): (0 - dist, key))
# Behave like a float.
def __cmp__(self, other):
@ -349,13 +348,13 @@ class Distance(object):
def __float__(self):
return self.distance
def __sub__(self, other):
return self.distance - other
def __rsub__(self, other):
return other - self.distance
# Behave like a dict.
def __getitem__(self, key):
@ -381,11 +380,11 @@ class Distance(object):
"""
if not isinstance(dist, Distance):
raise ValueError(
'`dist` must be a Distance object. It is: %r' % dist)
'`dist` must be a Distance object, not {0}'.format(type(dist))
)
for key, penalties in dist._penalties.iteritems():
self._penalties.setdefault(key, []).extend(penalties)
# Adding components.
def _eq(self, value1, value2):
@ -405,7 +404,8 @@ class Distance(object):
"""
if not 0.0 <= dist <= 1.0:
raise ValueError(
'`dist` must be between 0.0 and 1.0. It is: %r' % dist)
'`dist` must be between 0.0 and 1.0, not {0}'.format(dist)
)
self._penalties.setdefault(key, []).append(dist)
def add_equality(self, key, value, options):

View file

@ -50,14 +50,17 @@ class OrderedEnum(enum.Enum):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
@ -100,6 +103,7 @@ def current_metadata(items):
return likelies, consensus
def assign_items(items, tracks):
"""Given a list of Items and a list of TrackInfo objects, find the
best mapping between them. Returns a mapping from Items to TrackInfo
@ -126,12 +130,14 @@ def assign_items(items, tracks):
extra_tracks.sort(key=lambda t: (t.index, t.title))
return mapping, extra_items, extra_tracks
def track_index_changed(item, track_info):
"""Returns True if the item and track info index is different. Tolerates
per disc and per release numbering.
"""
return item.track not in (track_info.medium_index, track_info.index)
def track_distance(item, track_info, incl_artist=False):
"""Determines the significance of a track metadata change. Returns a
Distance object. `incl_artist` indicates that a distance component should
@ -142,7 +148,7 @@ def track_distance(item, track_info, incl_artist=False):
# Length.
if track_info.length:
diff = abs(item.length - track_info.length) - \
config['match']['track_length_grace'].as_number()
config['match']['track_length_grace'].as_number()
dist.add_ratio('track_length', diff,
config['match']['track_length_max'].as_number())
@ -167,6 +173,7 @@ def track_distance(item, track_info, incl_artist=False):
return dist
def distance(items, album_info, mapping):
"""Determines how "significant" an album metadata change would be.
Returns a Distance object. `album_info` is an AlbumInfo object
@ -272,6 +279,7 @@ def distance(items, album_info, mapping):
return dist
def match_by_id(items):
"""If the items are tagged with a MusicBrainz album ID, returns an
AlbumInfo object for the corresponding album. Otherwise, returns
@ -284,13 +292,14 @@ def match_by_id(items):
return None
# If all album IDs are equal, look up the album.
if bool(reduce(lambda x,y: x if x == y else (), albumids)):
if bool(reduce(lambda x, y: x if x == y else (), albumids)):
albumid = albumids[0]
log.debug('Searching for discovered album ID: ' + albumid)
return hooks.album_for_mbid(albumid)
else:
log.debug('No album ID consensus.')
def _recommendation(results):
"""Given a sorted list of AlbumMatch or TrackMatch objects, return a
recommendation based on the results' distances.
@ -341,6 +350,7 @@ def _recommendation(results):
return rec
def _add_candidate(items, results, info):
"""Given a candidate AlbumInfo object, attempt to add the candidate
to the output dictionary of AlbumMatch objects. This involves
@ -371,6 +381,7 @@ def _add_candidate(items, results, info):
results[info.album_id] = hooks.AlbumMatch(dist, info, mapping,
extra_items, extra_tracks)
def tag_album(items, search_artist=None, search_album=None,
search_id=None):
"""Bundles together the functionality used to infer tags for a
@ -422,8 +433,8 @@ def tag_album(items, search_artist=None, search_album=None,
# Is this album likely to be a "various artist" release?
va_likely = ((not consensus['artist']) or
(search_artist.lower() in VA_ARTISTS) or
any(item.comp for item in items))
(search_artist.lower() in VA_ARTISTS) or
any(item.comp for item in items))
log.debug(u'Album might be VA: %s' % str(va_likely))
# Get the results from the data sources.
@ -439,6 +450,7 @@ def tag_album(items, search_artist=None, search_album=None,
rec = _recommendation(candidates)
return cur_artist, cur_album, candidates, rec
def tag_item(item, search_artist=None, search_title=None,
search_id=None):
"""Attempts to find metadata for a single track. Returns a
@ -458,7 +470,7 @@ def tag_item(item, search_artist=None, search_title=None,
for track_info in hooks.tracks_for_id(trackid):
dist = track_distance(item, track_info, incl_artist=True)
candidates[track_info.track_id] = \
hooks.TrackMatch(dist, track_info)
hooks.TrackMatch(dist, track_info)
# If this is a good match, then don't keep searching.
rec = _recommendation(candidates.values())
if rec == Recommendation.strong and not config['import']['timid']:

View file

@ -32,6 +32,7 @@ BASE_URL = 'http://musicbrainz.org/'
musicbrainzngs.set_useragent('beets', beets.__version__,
'http://beets.radbox.org/')
class MusicBrainzAPIError(util.HumanReadableException):
"""An error while talking to MusicBrainz. The `query` field is the
parameter to the action and may have any type.
@ -51,12 +52,15 @@ RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits', 'aliases']
TRACK_INCLUDES = ['artists', 'aliases']
def track_url(trackid):
return urljoin(BASE_URL, 'recording/' + trackid)
def album_url(albumid):
return urljoin(BASE_URL, 'release/' + albumid)
def configure():
"""Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup.
@ -67,6 +71,7 @@ def configure():
config['musicbrainz']['ratelimit'].get(int),
)
def _preferred_alias(aliases):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
@ -81,13 +86,15 @@ def _preferred_alias(aliases):
# Search configured locales in order.
for locale in config['import']['languages'].as_str_seq():
# Find matching primary aliases for this locale.
matches = [a for a in aliases if a['locale'] == locale and 'primary' in a]
matches = [a for a in aliases
if a['locale'] == locale and 'primary' in a]
# Skip to the next locale if we have no matches
if not matches:
continue
return matches[0]
def _flatten_artist_credit(credit):
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
@ -133,6 +140,7 @@ def _flatten_artist_credit(credit):
''.join(artist_credit_parts),
)
def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None):
"""Translates a MusicBrainz recording result dictionary into a beets
@ -167,6 +175,7 @@ def track_info(recording, index=None, medium=None, medium_index=None,
info.decode()
return info
def _set_date_str(info, date_str, original=False):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately. If
@ -186,6 +195,7 @@ def _set_date_str(info, date_str, original=False):
key = 'original_' + key
setattr(info, key, date_num)
def album_info(release):
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
@ -288,6 +298,7 @@ def album_info(release):
info.decode()
return info
def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT):
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
@ -322,6 +333,7 @@ def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT):
if albuminfo is not None:
yield albuminfo
def match_track(artist, title, limit=SEARCH_LIMIT):
"""Searches for a single track and returns an iterable of TrackInfo
objects. May raise a MusicBrainzAPIError.
@ -342,6 +354,7 @@ def match_track(artist, title, limit=SEARCH_LIMIT):
for recording in res['recording-list']:
yield track_info(recording)
def _parse_id(s):
"""Search for a MusicBrainz ID in the given string and return it. If
no ID can be found, return None.
@ -351,6 +364,7 @@ def _parse_id(s):
if match:
return match.group()
def album_for_id(releaseid):
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
@ -371,6 +385,7 @@ def album_for_id(releaseid):
traceback.format_exc())
return album_info(res['release'])
def track_for_id(releaseid):
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.

View file

@ -41,10 +41,13 @@ action = Enum('action',
QUEUE_SIZE = 128
SINGLE_ARTIST_THRESH = 0.25
VARIOUS_ARTISTS = u'Various Artists'
PROGRESS_KEY = 'tagprogress'
HISTORY_KEY = 'taghistory'
# Global logger.
log = logging.getLogger('beets')
class ImportAbort(Exception):
"""Raised when the user aborts the tagging operation.
"""
@ -76,6 +79,7 @@ def _duplicate_check(lib, task):
found_albums.append(album_cand)
return found_albums
def _item_duplicate_check(lib, task):
"""Check whether an item already exists in the library. Returns a
list of Item objects.
@ -95,6 +99,7 @@ def _item_duplicate_check(lib, task):
found_items.append(other_item)
return found_items
def _infer_album_fields(task):
"""Given an album and an associated import task, massage the
album-level metadata. This ensures that the album artist is set
@ -108,8 +113,10 @@ def _infer_album_fields(task):
if task.choice_flag == action.ASIS:
# Taking metadata "as-is". Guess whether this album is VA.
plur_albumartist, freq = util.plurality(
[i.albumartist or i.artist for i in task.items])
if freq == len(task.items) or (freq > 1 and
[i.albumartist or i.artist for i in task.items]
)
if freq == len(task.items) or \
(freq > 1 and
float(freq) / len(task.items) >= SINGLE_ARTIST_THRESH):
# Single-artist album.
changes['albumartist'] = plur_albumartist
@ -142,12 +149,14 @@ def _infer_album_fields(task):
for k, v in changes.iteritems():
setattr(item, k, v)
def _resume():
"""Check whether an import should resume and return a boolean or the
string 'ask' indicating that the user should be queried.
"""
return config['import']['resume'].as_choice([True, False, 'ask'])
def _open_state():
"""Reads the state file, returning a dictionary."""
try:
@ -155,6 +164,8 @@ def _open_state():
return pickle.load(f)
except (IOError, EOFError):
return {}
def _save_state(state):
"""Writes the state dictionary out to disk."""
try:
@ -166,7 +177,7 @@ def _save_state(state):
# Utilities for reading and writing the beets progress file, which
# allows long tagging tasks to be resumed when they pause (or crash).
PROGRESS_KEY = 'tagprogress'
def progress_set(toppath, paths):
"""Record that tagging for the given `toppath` was successful up to
`paths`. If paths is None, then clear the progress value (indicating
@ -184,6 +195,8 @@ def progress_set(toppath, paths):
state[PROGRESS_KEY][toppath] = paths
_save_state(state)
def progress_get(toppath):
"""Get the last successfully tagged subpath of toppath. If toppath
has no progress information, returns None.
@ -197,7 +210,7 @@ def progress_get(toppath):
# Similarly, utilities for manipulating the "incremental" import log.
# This keeps track of all directories that were ever imported, which
# allows the importer to only import new stuff.
HISTORY_KEY = 'taghistory'
def history_add(paths):
"""Indicate that the import of the album in `paths` is completed and
should not be repeated in incremental imports.
@ -209,6 +222,8 @@ def history_add(paths):
state[HISTORY_KEY].add(tuple(paths))
_save_state(state)
def history_get():
"""Get the set of completed path tuples in incremental imports.
"""
@ -456,7 +471,6 @@ class ImportTask(object):
if self.is_album and self.paths and not self.sentinel:
history_add(self.paths)
# Logical decisions.
def should_write_tags(self):
@ -474,7 +488,6 @@ class ImportTask(object):
"""
return self.sentinel or self.choice_flag == action.SKIP
# Convenient data.
def chosen_ident(self):
@ -512,7 +525,6 @@ class ImportTask(object):
else:
return [self.item]
# Utilities.
def prune(self, filename):
@ -622,6 +634,7 @@ def read_tasks(session):
log.info(u'Incremental import: skipped %i directories.' %
incremental_skipped)
def query_tasks(session):
"""A generator that works as a drop-in-replacement for read_tasks.
Instead of finding files from the filesystem, a query is used to
@ -640,6 +653,7 @@ def query_tasks(session):
items = list(album.items())
yield ImportTask(None, [album.item_dir()], items)
def initial_lookup(session):
"""A coroutine for performing the initial MusicBrainz lookup for an
album. It accepts lists of Items and yields
@ -659,6 +673,7 @@ def initial_lookup(session):
*autotag.tag_album(task.items)
)
def user_query(session):
"""A coroutine for interfacing with the user about the tagging
process.
@ -726,6 +741,7 @@ def user_query(session):
session.log_choice(task, True)
recent.add(ident)
def show_progress(session):
"""This stage replaces the initial_lookup and user_query stages
when the importer is run without autotagging. It displays the album
@ -743,6 +759,7 @@ def show_progress(session):
task.set_null_candidates()
task.set_choice(action.ASIS)
def apply_choices(session):
"""A coroutine for applying changes to albums and singletons during
the autotag process.
@ -830,6 +847,7 @@ def apply_choices(session):
for item in items:
session.lib.add(item)
def plugin_stage(session, func):
"""A coroutine (pipeline stage) that calls the given function with
each non-skipped import task. These stages occur between applying
@ -846,6 +864,7 @@ def plugin_stage(session, func):
for item in task.imported_items():
item.load()
def manipulate_files(session):
"""A coroutine (pipeline stage) that performs necessary file
manipulations *after* items have been added to the library.
@ -905,6 +924,7 @@ def manipulate_files(session):
# Plugin event.
plugins.send('import_task_files', session=session, task=task)
def finalize(session):
"""A coroutine that finishes up importer tasks. In particular, the
coroutine sends plugin events, deletes old files, and saves
@ -969,6 +989,7 @@ def item_lookup(session):
task.set_item_candidates(*autotag.tag_item(task.item))
def item_query(session):
"""A coroutine that queries the user for input on single-item
lookups.
@ -993,6 +1014,7 @@ def item_query(session):
session.log_choice(task, True)
recent.add(ident)
def item_progress(session):
"""Skips the lookup and query stages in a non-autotagged singleton
import. Just shows progress.

View file

@ -9,4 +9,4 @@ ignore=F401,E241
# List of files that have not been cleand up yet. We will try to reduce
# this with each commit
exclude=test/*,beetsplug/*,beets/autotag/hooks.py,beets/autotag/match.py,beets/autotag/mb.py,beets/importer.py,beets/library.py,beets/ui/commands.py,beets/util/functemplate.py
exclude=test/*,beetsplug/*,beets/library.py,beets/ui/commands.py,beets/util/functemplate.py