Merge remote-tracking branch 'upstream/master' into sphinx-deps

This commit is contained in:
Stefano Pigozzi 2023-03-08 13:43:22 +00:00 committed by GitHub
commit 87c43a0b88
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
42 changed files with 1153 additions and 689 deletions

View file

@ -347,7 +347,6 @@ others. See `unittest.mock`_ for more info.
``mock.patch``, as they require manual cleanup. Use the annotation or
context manager forms instead.
.. _Python unittest: https://docs.python.org/2/library/unittest.html
.. _Codecov: https://codecov.io/github/beetbox/beets
.. _pytest-random: https://github.com/klrmn/pytest-random
.. _tox: https://tox.readthedocs.io/en/latest/
@ -358,10 +357,9 @@ others. See `unittest.mock`_ for more info.
.. _`https://github.com/beetbox/beets/blob/master/setup.py#L99`: https://github.com/beetbox/beets/blob/master/setup.py#L99
.. _test: https://github.com/beetbox/beets/tree/master/test
.. _`https://github.com/beetbox/beets/blob/master/test/test_template.py#L224`: https://github.com/beetbox/beets/blob/master/test/test_template.py#L224
.. _unittest: https://docs.python.org/3.8/library/unittest.html
.. _unittest: https://docs.python.org/3/library/unittest.html
.. _integration test: https://github.com/beetbox/beets/actions?query=workflow%3A%22integration+tests%22
.. _unittest.mock: https://docs.python.org/3/library/unittest.mock.html
.. _Python unittest: https://docs.python.org/2/library/unittest.html
.. _documentation: https://beets.readthedocs.io/en/stable/
.. _pip: https://pip.pypa.io/en/stable/
.. _vim: https://www.vim.org/

View file

@ -14,7 +14,9 @@
"""Facilities for automatically determining files' correct metadata.
"""
from typing import Mapping
from beets.library import Item
from beets import logging
from beets import config
@ -71,7 +73,7 @@ SPECIAL_FIELDS = {
# Additional utilities for the main interface.
def apply_item_metadata(item, track_info):
def apply_item_metadata(item: Item, track_info: TrackInfo):
"""Set an item's metadata from its matched TrackInfo object.
"""
item.artist = track_info.artist
@ -95,7 +97,7 @@ def apply_item_metadata(item, track_info):
# and track number). Perhaps these should be emptied?
def apply_metadata(album_info, mapping):
def apply_metadata(album_info: AlbumInfo, mapping: Mapping[Item, TrackInfo]):
"""Set the items' metadata to match an AlbumInfo object using a
mapping from Items to TrackInfo objects.
"""

View file

@ -14,13 +14,17 @@
"""Glue between metadata sources and the matching logic."""
from __future__ import annotations
from collections import namedtuple
from functools import total_ordering
import re
from typing import Dict, List, Tuple, Iterator, Union, Any, Optional,\
Iterable, Callable, cast
from beets import logging
from beets import plugins
from beets import config
from beets.library import Item
from beets.util import as_string
from beets.autotag import mb
from jellyfish import levenshtein_distance
@ -28,12 +32,6 @@ from unidecode import unidecode
log = logging.getLogger('beets')
# The name of the type for patterns in re changed in Python 3.7.
try:
Pattern = re._pattern_type
except AttributeError:
Pattern = re.Pattern
# Classes used to represent candidate options.
class AttrDict(dict):
@ -68,17 +66,45 @@ class AlbumInfo(AttrDict):
The others are optional and may be None.
"""
def __init__(self, tracks, album=None, album_id=None, artist=None,
artist_id=None, asin=None, albumtype=None, va=False,
year=None, month=None, day=None, label=None, mediums=None,
artist_sort=None, releasegroup_id=None, catalognum=None,
script=None, language=None, country=None, style=None,
genre=None, albumstatus=None, media=None, albumdisambig=None,
releasegroupdisambig=None, artist_credit=None,
original_year=None, original_month=None,
original_day=None, data_source=None, data_url=None,
discogs_albumid=None, discogs_labelid=None,
discogs_artistid=None, **kwargs):
# TYPING: are all of these correct? I've assumed optional strings
def __init__(
self,
tracks: List['TrackInfo'],
album: Optional[str] = None,
album_id: Optional[str] = None,
artist: Optional[str] = None,
artist_id: Optional[str] = None,
asin: Optional[str] = None,
albumtype: Optional[str] = None,
va: bool = False,
year: Optional[int] = None,
month: Optional[int] = None,
day: Optional[int] = None,
label: Optional[str] = None,
mediums: Optional[int] = None,
artist_sort: Optional[str] = None,
releasegroup_id: Optional[str] = None,
catalognum: Optional[str] = None,
script: Optional[str] = None,
language: Optional[str] = None,
country: Optional[str] = None,
style: Optional[str] = None,
genre: Optional[str] = None,
albumstatus: Optional[str] = None,
media: Optional[str] = None,
albumdisambig: Optional[str] = None,
releasegroupdisambig: Optional[str] = None,
artist_credit: Optional[str] = None,
original_year: Optional[int] = None,
original_month: Optional[int] = None,
original_day: Optional[int] = None,
data_source: Optional[str] = None,
data_url: Optional[str] = None,
discogs_albumid: Optional[str] = None,
discogs_labelid: Optional[str] = None,
discogs_artistid: Optional[str] = None,
**kwargs,
):
self.album = album
self.album_id = album_id
self.artist = artist
@ -118,7 +144,7 @@ class AlbumInfo(AttrDict):
# Work around a bug in python-musicbrainz-ngs that causes some
# strings to be bytes rather than Unicode.
# https://github.com/alastair/python-musicbrainz-ngs/issues/85
def decode(self, codec='utf-8'):
def decode(self, codec: str = 'utf-8'):
"""Ensure that all string attributes on this object, and the
constituent `TrackInfo` objects, are decoded to Unicode.
"""
@ -135,7 +161,7 @@ class AlbumInfo(AttrDict):
for track in self.tracks:
track.decode(codec)
def copy(self):
def copy(self) -> 'AlbumInfo':
dupe = AlbumInfo([])
dupe.update(self)
dupe.tracks = [track.copy() for track in self.tracks]
@ -154,15 +180,38 @@ class TrackInfo(AttrDict):
are all 1-based.
"""
def __init__(self, title=None, track_id=None, release_track_id=None,
artist=None, artist_id=None, length=None, index=None,
medium=None, medium_index=None, medium_total=None,
artist_sort=None, disctitle=None, artist_credit=None,
data_source=None, data_url=None, media=None, lyricist=None,
composer=None, composer_sort=None, arranger=None,
track_alt=None, work=None, mb_workid=None,
work_disambig=None, bpm=None, initial_key=None, genre=None,
**kwargs):
# TYPING: are all of these correct? I've assumed optional strings
def __init__(
self,
title: Optional[str] = None,
track_id: Optional[str] = None,
release_track_id: Optional[str] = None,
artist: Optional[str] = None,
artist_id: Optional[str] = None,
length: Optional[float] = None,
index: Optional[int] = None,
medium: Optional[int] = None,
medium_index: Optional[int] = None,
medium_total: Optional[int] = None,
artist_sort: Optional[str] = None,
disctitle: Optional[str] = None,
artist_credit: Optional[str] = None,
data_source: Optional[str] = None,
data_url: Optional[str] = None,
media: Optional[str] = None,
lyricist: Optional[str] = None,
composer: Optional[str] = None,
composer_sort: Optional[str] = None,
arranger: Optional[str] = None,
track_alt: Optional[str] = None,
work: Optional[str] = None,
mb_workid: Optional[str] = None,
work_disambig: Optional[str] = None,
bpm: Optional[str] = None,
initial_key: Optional[str] = None,
genre: Optional[str] = None,
**kwargs,
):
self.title = title
self.track_id = track_id
self.release_track_id = release_track_id
@ -203,7 +252,7 @@ class TrackInfo(AttrDict):
if isinstance(value, bytes):
setattr(self, fld, value.decode(codec, 'ignore'))
def copy(self):
def copy(self) -> 'TrackInfo':
dupe = TrackInfo()
dupe.update(self)
return dupe
@ -229,7 +278,7 @@ SD_REPLACE = [
]
def _string_dist_basic(str1, str2):
def _string_dist_basic(str1: str, str2: str) -> float:
"""Basic edit distance between two strings, ignoring
non-alphanumeric characters and case. Comparisons are based on a
transliteration/lowering to ASCII characters. Normalized by string
@ -246,7 +295,7 @@ def _string_dist_basic(str1, str2):
return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2)))
def string_dist(str1, str2):
def string_dist(str1: Optional[str], str2: Optional[str]) -> float:
"""Gives an "intuitive" edit distance between two strings. This is
an edit distance, normalized by the string length, with a number of
tweaks that reflect intuition about text.
@ -330,9 +379,10 @@ class Distance:
def __init__(self):
self._penalties = {}
self.tracks: Dict[TrackInfo, Distance] = {}
@LazyClassProperty
def _weights(cls): # noqa: N805
def _weights(cls) -> Dict[str, float]: # noqa: N805
"""A dictionary from keys to floating-point weights.
"""
weights_view = config['match']['distance_weights']
@ -344,7 +394,7 @@ class Distance:
# Access the components and their aggregates.
@property
def distance(self):
def distance(self) -> float:
"""Return a weighted and normalized distance across all
penalties.
"""
@ -354,7 +404,7 @@ class Distance:
return 0.0
@property
def max_distance(self):
def max_distance(self) -> float:
"""Return the maximum distance penalty (normalization factor).
"""
dist_max = 0.0
@ -363,7 +413,7 @@ class Distance:
return dist_max
@property
def raw_distance(self):
def raw_distance(self) -> float:
"""Return the raw (denormalized) distance.
"""
dist_raw = 0.0
@ -371,7 +421,7 @@ class Distance:
dist_raw += sum(penalty) * self._weights[key]
return dist_raw
def items(self):
def items(self) -> List[Tuple[str, float]]:
"""Return a list of (key, dist) pairs, with `dist` being the
weighted distance, sorted from highest to lowest. Does not
include penalties with a zero value.
@ -389,32 +439,32 @@ class Distance:
key=lambda key_and_dist: (-key_and_dist[1], key_and_dist[0])
)
def __hash__(self):
def __hash__(self) -> int:
return id(self)
def __eq__(self, other):
def __eq__(self, other) -> bool:
return self.distance == other
# Behave like a float.
def __lt__(self, other):
def __lt__(self, other) -> bool:
return self.distance < other
def __float__(self):
def __float__(self) -> float:
return self.distance
def __sub__(self, other):
def __sub__(self, other) -> float:
return self.distance - other
def __rsub__(self, other):
def __rsub__(self, other) -> float:
return other - self.distance
def __str__(self):
def __str__(self) -> str:
return f"{self.distance:.2f}"
# Behave like a dict.
def __getitem__(self, key):
def __getitem__(self, key) -> float:
"""Returns the weighted distance for a named penalty.
"""
dist = sum(self._penalties[key]) * self._weights[key]
@ -423,16 +473,16 @@ class Distance:
return dist / dist_max
return 0.0
def __iter__(self):
def __iter__(self) -> Iterator[Tuple[str, float]]:
return iter(self.items())
def __len__(self):
def __len__(self) -> int:
return len(self.items())
def keys(self):
def keys(self) -> List[str]:
return [key for key, _ in self.items()]
def update(self, dist):
def update(self, dist: 'Distance'):
"""Adds all the distance penalties from `dist`.
"""
if not isinstance(dist, Distance):
@ -444,16 +494,17 @@ class Distance:
# Adding components.
def _eq(self, value1, value2):
def _eq(self, value1: Union[re.Pattern, Any], value2: Any) -> bool:
"""Returns True if `value1` is equal to `value2`. `value1` may
be a compiled regular expression, in which case it will be
matched against `value2`.
"""
if isinstance(value1, Pattern):
if isinstance(value1, re.Pattern):
value2 = cast(str, value2)
return bool(value1.match(value2))
return value1 == value2
def add(self, key, dist):
def add(self, key: str, dist: float):
"""Adds a distance penalty. `key` must correspond with a
configured weight setting. `dist` must be a float between 0.0
and 1.0, and will be added to any existing distance penalties
@ -465,7 +516,12 @@ class Distance:
)
self._penalties.setdefault(key, []).append(dist)
def add_equality(self, key, value, options):
def add_equality(
self,
key: str,
value: Any,
options: Union[List[Any], Tuple[Any, ...], Any],
):
"""Adds a distance penalty of 1.0 if `value` doesn't match any
of the values in `options`. If an option is a compiled regular
expression, it will be considered equal if it matches against
@ -481,7 +537,7 @@ class Distance:
dist = 1.0
self.add(key, dist)
def add_expr(self, key, expr):
def add_expr(self, key: str, expr: bool):
"""Adds a distance penalty of 1.0 if `expr` evaluates to True,
or 0.0.
"""
@ -490,7 +546,7 @@ class Distance:
else:
self.add(key, 0.0)
def add_number(self, key, number1, number2):
def add_number(self, key: str, number1: int, number2: int):
"""Adds a distance penalty of 1.0 for each number of difference
between `number1` and `number2`, or 0.0 when there is no
difference. Use this when there is no upper limit on the
@ -503,7 +559,12 @@ class Distance:
else:
self.add(key, 0.0)
def add_priority(self, key, value, options):
def add_priority(
self,
key: str,
value: Any,
options: Union[List[Any], Tuple[Any, ...], Any],
):
"""Adds a distance penalty that corresponds to the position at
which `value` appears in `options`. A distance penalty of 0.0
for the first option, or 1.0 if there is no matching option. If
@ -521,7 +582,12 @@ class Distance:
dist = 1.0
self.add(key, dist)
def add_ratio(self, key, number1, number2):
def add_ratio(
self,
key: str,
number1: Union[int, float],
number2: Union[int, float],
):
"""Adds a distance penalty for `number1` as a ratio of `number2`.
`number1` is bound at 0 and `number2`.
"""
@ -532,7 +598,7 @@ class Distance:
dist = 0.0
self.add(key, dist)
def add_string(self, key, str1, str2):
def add_string(self, key: str, str1: Optional[str], str2: Optional[str]):
"""Adds a distance penalty based on the edit distance between
`str1` and `str2`.
"""
@ -550,7 +616,7 @@ TrackMatch = namedtuple('TrackMatch', ['distance', 'info'])
# Aggregation of sources.
def album_for_mbid(release_id):
def album_for_mbid(release_id: str) -> Optional[AlbumInfo]:
"""Get an AlbumInfo object for a MusicBrainz release ID. Return None
if the ID is not found.
"""
@ -561,9 +627,10 @@ def album_for_mbid(release_id):
return album
except mb.MusicBrainzAPIError as exc:
exc.log(log)
return None
def track_for_mbid(recording_id):
def track_for_mbid(recording_id: str) -> Optional[TrackInfo]:
"""Get a TrackInfo object for a MusicBrainz recording ID. Return None
if the ID is not found.
"""
@ -574,9 +641,10 @@ def track_for_mbid(recording_id):
return track
except mb.MusicBrainzAPIError as exc:
exc.log(log)
return None
def albums_for_id(album_id):
def albums_for_id(album_id: str) -> Iterable[AlbumInfo]:
"""Get a list of albums for an ID."""
a = album_for_mbid(album_id)
if a:
@ -587,7 +655,7 @@ def albums_for_id(album_id):
yield a
def tracks_for_id(track_id):
def tracks_for_id(track_id: str) -> Iterable[TrackInfo]:
"""Get a list of tracks for an ID."""
t = track_for_mbid(track_id)
if t:
@ -598,7 +666,7 @@ def tracks_for_id(track_id):
yield t
def invoke_mb(call_func, *args):
def invoke_mb(call_func: Callable, *args):
try:
return call_func(*args)
except mb.MusicBrainzAPIError as exc:
@ -607,7 +675,13 @@ def invoke_mb(call_func, *args):
@plugins.notify_info_yielded('albuminfo_received')
def album_candidates(items, artist, album, va_likely, extra_tags):
def album_candidates(
items: List[Item],
artist: str,
album: str,
va_likely: bool,
extra_tags: Dict,
) -> Iterable[Tuple]:
"""Search for album matches. ``items`` is a list of Item objects
that make up the album. ``artist`` and ``album`` are the respective
names (strings), which may be derived from the item list or may be
@ -633,7 +707,7 @@ def album_candidates(items, artist, album, va_likely, extra_tags):
@plugins.notify_info_yielded('trackinfo_received')
def item_candidates(item, artist, title):
def item_candidates(item: Item, artist: str, title: str) -> Iterable[Tuple]:
"""Search for item matches. ``item`` is the Item to be matched.
``artist`` and ``title`` are strings and either reflect the item or
are specified by the user.

View file

@ -19,14 +19,29 @@ releases and tracks.
import datetime
import re
from typing import (
Any,
Dict,
Iterable,
List,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
cast,
)
from munkres import Munkres
from collections import namedtuple
from beets import logging
from beets import plugins
from beets import config
from beets.library import Item
from beets.util import plurality
from beets.autotag import hooks
from beets.autotag import hooks, TrackInfo, Distance, AlbumInfo, TrackMatch, \
AlbumMatch
from beets.util.enumeration import OrderedEnum
# Artist signals that indicate "various artists". These are used at the
@ -60,7 +75,9 @@ Proposal = namedtuple('Proposal', ('candidates', 'recommendation'))
# Primary matching functionality.
def current_metadata(items):
def current_metadata(
items: Iterable[Item],
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""Extract the likely current metadata for an album given a list of its
items. Return two dictionaries:
- The most common value for each field.
@ -85,7 +102,10 @@ def current_metadata(items):
return likelies, consensus
def assign_items(items, tracks):
def assign_items(
items: Sequence[Item],
tracks: Sequence[TrackInfo],
) -> Tuple[Dict[Item, TrackInfo], List[Item], List[TrackInfo]]:
"""Given a list of Items and a list of TrackInfo objects, find the
best mapping between them. Returns a mapping from Items to TrackInfo
objects, a set of extra Items, and a set of extra TrackInfo
@ -93,10 +113,10 @@ def assign_items(items, tracks):
of objects of the two types.
"""
# Construct the cost matrix.
costs = []
costs: List[List[Distance]] = []
for item in items:
row = []
for i, track in enumerate(tracks):
for track in tracks:
row.append(track_distance(item, track))
costs.append(row)
@ -114,14 +134,18 @@ def assign_items(items, tracks):
return mapping, extra_items, extra_tracks
def track_index_changed(item, track_info):
def track_index_changed(item: Item, track_info: TrackInfo) -> bool:
"""Returns True if the item and track info index is different. Tolerates
per disc and per release numbering.
"""
return item.track not in (track_info.medium_index, track_info.index)
def track_distance(item, track_info, incl_artist=False):
def track_distance(
item: Item,
track_info: TrackInfo,
incl_artist: bool = False,
) -> Distance:
"""Determines the significance of a track metadata change. Returns a
Distance object. `incl_artist` indicates that a distance component should
be included for the track artist (i.e., for various-artist releases).
@ -130,10 +154,18 @@ def track_distance(item, track_info, incl_artist=False):
# Length.
if track_info.length:
diff = abs(item.length - track_info.length) - \
config['match']['track_length_grace'].as_number()
dist.add_ratio('track_length', diff,
config['match']['track_length_max'].as_number())
item_length = cast(float, item.length)
track_length_grace = cast(
Union[float, int],
config['match']['track_length_grace'].as_number(),
)
track_length_max = cast(
Union[float, int],
config['match']['track_length_max'].as_number(),
)
diff = abs(item_length - track_info.length) - track_length_grace
dist.add_ratio('track_length', diff, track_length_max)
# Title.
dist.add_string('track_title', item.title, track_info.title)
@ -157,7 +189,11 @@ def track_distance(item, track_info, incl_artist=False):
return dist
def distance(items, album_info, mapping):
def distance(
items: Sequence[Item],
album_info: AlbumInfo,
mapping: Dict[Item, TrackInfo],
) -> Distance:
"""Determines how "significant" an album metadata change would be.
Returns a Distance object. `album_info` is an AlbumInfo object
reflecting the album to be compared. `items` is a sequence of all
@ -181,6 +217,7 @@ def distance(items, album_info, mapping):
if album_info.media:
# Preferred media options.
patterns = config['match']['preferred']['media'].as_str_seq()
patterns = cast(Sequence, patterns)
options = [re.compile(r'(\d+x)?(%s)' % pat, re.I) for pat in patterns]
if options:
dist.add_priority('media', album_info.media, options)
@ -217,6 +254,7 @@ def distance(items, album_info, mapping):
# Preferred countries.
patterns = config['match']['preferred']['countries'].as_str_seq()
patterns = cast(Sequence, patterns)
options = [re.compile(pat, re.I) for pat in patterns]
if album_info.country and options:
dist.add_priority('country', album_info.country, options)
@ -250,11 +288,11 @@ def distance(items, album_info, mapping):
dist.add('tracks', dist.tracks[track].distance)
# Missing tracks.
for i in range(len(album_info.tracks) - len(mapping)):
for _ in range(len(album_info.tracks) - len(mapping)):
dist.add('missing_tracks', 1.0)
# Unmatched tracks.
for i in range(len(items) - len(mapping)):
for _ in range(len(items) - len(mapping)):
dist.add('unmatched_tracks', 1.0)
# Plugins.
@ -263,7 +301,7 @@ def distance(items, album_info, mapping):
return dist
def match_by_id(items):
def match_by_id(items: Iterable[Item]):
"""If the items are tagged with a MusicBrainz album ID, returns an
AlbumInfo object for the corresponding album. Otherwise, returns
None.
@ -287,7 +325,9 @@ def match_by_id(items):
return hooks.album_for_mbid(first)
def _recommendation(results):
def _recommendation(
results: Sequence[Union[AlbumMatch, TrackMatch]],
) -> Recommendation:
"""Given a sorted list of AlbumMatch or TrackMatch objects, return a
recommendation based on the results' distances.
@ -338,12 +378,19 @@ def _recommendation(results):
return rec
def _sort_candidates(candidates):
AnyMatch = TypeVar("AnyMatch", TrackMatch, AlbumMatch)
def _sort_candidates(candidates: Iterable[AnyMatch]) -> Sequence[AnyMatch]:
"""Sort candidates by distance."""
return sorted(candidates, key=lambda match: match.distance)
def _add_candidate(items, results, info):
def _add_candidate(
items: Sequence[Item],
results: Dict[Any, AlbumMatch],
info: AlbumInfo,
):
"""Given a candidate AlbumInfo object, attempt to add the candidate
to the output dictionary of AlbumMatch objects. This involves
checking the track count, ordering the items, checking for
@ -363,7 +410,7 @@ def _add_candidate(items, results, info):
return
# Discard matches without required tags.
for req_tag in config['match']['required'].as_str_seq():
for req_tag in cast(Sequence, config['match']['required'].as_str_seq()):
if getattr(info, req_tag) is None:
log.debug('Ignored. Missing required tag: {0}', req_tag)
return
@ -376,7 +423,8 @@ def _add_candidate(items, results, info):
# Skip matches with ignored penalties.
penalties = [key for key, _ in dist]
for penalty in config['match']['ignored'].as_str_seq():
ignored = cast(Sequence[str], config['match']['ignored'].as_str_seq())
for penalty in ignored:
if penalty in penalties:
log.debug('Ignored. Penalty: {0}', penalty)
return
@ -386,8 +434,12 @@ def _add_candidate(items, results, info):
extra_items, extra_tracks)
def tag_album(items, search_artist=None, search_album=None,
search_ids=[]):
def tag_album(
items,
search_artist: Optional[str] = None,
search_album: Optional[str] = None,
search_ids: List = [],
) -> Tuple[str, str, Proposal]:
"""Return a tuple of the current artist name, the current album
name, and a `Proposal` containing `AlbumMatch` candidates.
@ -407,20 +459,19 @@ def tag_album(items, search_artist=None, search_album=None,
"""
# Get current metadata.
likelies, consensus = current_metadata(items)
cur_artist = likelies['artist']
cur_album = likelies['album']
cur_artist = cast(str, likelies['artist'])
cur_album = cast(str, likelies['album'])
log.debug('Tagging {0} - {1}', cur_artist, cur_album)
# The output result (distance, AlbumInfo) tuples (keyed by MB album
# ID).
candidates = {}
# The output result, keys are the MB album ID.
candidates: Dict[Any, AlbumMatch] = {}
# Search by explicit ID.
if search_ids:
for search_id in search_ids:
log.debug('Searching for album ID: {0}', search_id)
for id_candidate in hooks.albums_for_id(search_id):
_add_candidate(items, candidates, id_candidate)
for album_info_for_id in hooks.albums_for_id(search_id):
_add_candidate(items, candidates, album_info_for_id)
# Use existing metadata or text search.
else:
@ -467,13 +518,17 @@ def tag_album(items, search_artist=None, search_album=None,
log.debug('Evaluating {0} candidates.', len(candidates))
# Sort and get the recommendation.
candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates)
return cur_artist, cur_album, Proposal(candidates, rec)
candidates_sorted = _sort_candidates(candidates.values())
rec = _recommendation(candidates_sorted)
return cur_artist, cur_album, Proposal(candidates_sorted, rec)
def tag_item(item, search_artist=None, search_title=None,
search_ids=[]):
def tag_item(
item,
search_artist: Optional[str] = None,
search_title: Optional[str] = None,
search_ids: List = [],
) -> Proposal:
"""Find metadata for a single track. Return a `Proposal` consisting
of `TrackMatch` objects.
@ -485,6 +540,7 @@ def tag_item(item, search_artist=None, search_title=None,
# Holds candidates found so far: keys are MBIDs; values are
# (distance, TrackInfo) pairs.
candidates = {}
rec: Optional[Recommendation] = None
# First, try matching by MusicBrainz ID.
trackids = search_ids or [t for t in [item.mb_trackid] if t]
@ -505,6 +561,7 @@ def tag_item(item, search_artist=None, search_title=None,
# If we're searching by ID, don't proceed.
if search_ids:
if candidates:
assert rec is not None
return Proposal(_sort_candidates(candidates.values()), rec)
else:
return Proposal([], Recommendation.none)
@ -521,6 +578,6 @@ def tag_item(item, search_artist=None, search_title=None,
# Sort by distance and return with recommendation.
log.debug('Found {0} candidates.', len(candidates))
candidates = _sort_candidates(candidates.values())
rec = _recommendation(candidates)
return Proposal(candidates, rec)
candidates_sorted = _sort_candidates(candidates.values())
rec = _recommendation(candidates_sorted)
return Proposal(candidates_sorted, rec)

View file

@ -14,6 +14,8 @@
"""Searches for albums in the MusicBrainz database.
"""
from __future__ import annotations
from typing import Any, List, Sequence, Tuple, Dict, Optional, Iterator, cast
import musicbrainzngs
import re
@ -82,11 +84,11 @@ if 'genres' in musicbrainzngs.VALID_INCLUDES['recording']:
RELEASE_INCLUDES += ['genres']
def track_url(trackid):
def track_url(trackid: str) -> str:
return urljoin(BASE_URL, 'recording/' + trackid)
def album_url(albumid):
def album_url(albumid: str) -> str:
return urljoin(BASE_URL, 'release/' + albumid)
@ -106,7 +108,7 @@ def configure():
)
def _preferred_alias(aliases):
def _preferred_alias(aliases: List):
"""Given an list of alias structures for an artist credit, select
and return the user's preferred alias alias or None if no matching
alias is found.
@ -138,12 +140,13 @@ def _preferred_alias(aliases):
return matches[0]
def _preferred_release_event(release):
def _preferred_release_event(release: Dict[str, Any]) -> Tuple[str, str]:
"""Given a release, select and return the user's preferred release
event as a tuple of (country, release_date). Fall back to the
default release event if a preferred event is not found.
"""
countries = config['match']['preferred']['countries'].as_str_seq()
countries = cast(Sequence, countries)
for country in countries:
for event in release.get('release-event-list', {}):
@ -153,10 +156,13 @@ def _preferred_release_event(release):
except KeyError:
pass
return release.get('country'), release.get('date')
return (
cast(str, release.get('country')),
cast(str, release.get('date'))
)
def _flatten_artist_credit(credit):
def _flatten_artist_credit(credit: List[Dict]) -> Tuple[str, str, str]:
"""Given a list representing an ``artist-credit`` block, flatten the
data into a triple of joined artist name strings: canonical, sort, and
credit.
@ -215,8 +221,13 @@ def _get_related_artist_names(relations, relation_type):
return ', '.join(related_artists)
def track_info(recording, index=None, medium=None, medium_index=None,
medium_total=None):
def track_info(
recording: Dict,
index: Optional[int] = None,
medium: Optional[int] = None,
medium_index: Optional[int] = None,
medium_total: Optional[int] = None,
) -> beets.autotag.hooks.TrackInfo:
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object. Three parameters are optional and are used
only for tracks that appear on releases (non-singletons): ``index``,
@ -251,7 +262,7 @@ def track_info(recording, index=None, medium=None, medium_index=None,
)
if recording.get('length'):
info.length = int(recording['length']) / (1000.0)
info.length = int(recording['length']) / 1000.0
info.trackdisambig = recording.get('disambiguation')
@ -303,7 +314,11 @@ def track_info(recording, index=None, medium=None, medium_index=None,
return info
def _set_date_str(info, date_str, original=False):
def _set_date_str(
info: beets.autotag.hooks.AlbumInfo,
date_str: str,
original: bool = False,
):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately. If
`original`, then set the original_year, etc., fields.
@ -323,7 +338,7 @@ def _set_date_str(info, date_str, original=False):
setattr(info, key, date_num)
def album_info(release):
def album_info(release: Dict) -> beets.autotag.hooks.AlbumInfo:
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
@ -451,7 +466,7 @@ def album_info(release):
if release['release-group']['secondary-type-list']:
for sec_type in release['release-group']['secondary-type-list']:
albumtypes.append(sec_type.lower())
info.albumtypes = '; '.join(albumtypes)
info.albumtypes = albumtypes
# Release events.
info.country, release_date = _preferred_release_event(release)
@ -487,12 +502,14 @@ def album_info(release):
release['release-group'].get('genre-list', []),
release.get('genre-list', []),
]
genres = Counter()
genres: Counter[str] = Counter()
for source in sources:
for genreitem in source:
genres[genreitem['name']] += int(genreitem['count'])
info.genre = '; '.join(g[0] for g in sorted(genres.items(),
key=lambda g: -g[1]))
info.genre = '; '.join(
genre for genre, _count
in sorted(genres.items(), key=lambda g: -g[1])
)
extra_albumdatas = plugins.send('mb_album_extract', data=release)
for extra_albumdata in extra_albumdatas:
@ -502,7 +519,12 @@ def album_info(release):
return info
def match_album(artist, album, tracks=None, extra_tags=None):
def match_album(
artist: str,
album: str,
tracks: Optional[int] = None,
extra_tags: Optional[Dict[str, Any]] = None,
) -> Iterator[beets.autotag.hooks.AlbumInfo]:
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects. May raise a
MusicBrainzAPIError.
@ -522,9 +544,9 @@ def match_album(artist, album, tracks=None, extra_tags=None):
# Additional search cues from existing metadata.
if extra_tags:
for tag in extra_tags:
for tag, value in extra_tags.items():
key = FIELDS_TO_MB_KEYS[tag]
value = str(extra_tags.get(tag, '')).lower().strip()
value = str(value).lower().strip()
if key == 'catno':
value = value.replace(' ', '')
if value:
@ -549,7 +571,10 @@ def match_album(artist, album, tracks=None, extra_tags=None):
yield albuminfo
def match_track(artist, title):
def match_track(
artist: str,
title: str,
) -> Iterator[beets.autotag.hooks.TrackInfo]:
"""Searches for a single track and returns an iterable of TrackInfo
objects. May raise a MusicBrainzAPIError.
"""
@ -571,17 +596,18 @@ def match_track(artist, title):
yield track_info(recording)
def _parse_id(s):
def _parse_id(s: str) -> Optional[str]:
"""Search for a MusicBrainz ID in the given string and return it. If
no ID can be found, return None.
"""
# Find the first thing that looks like a UUID/MBID.
match = re.search('[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}', s)
if match:
return match.group()
if match is not None:
return match.group() if match else None
return None
def album_for_id(releaseid):
def album_for_id(releaseid: str) -> Optional[beets.autotag.hooks.AlbumInfo]:
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found. May raise a
MusicBrainzAPIError.
@ -590,7 +616,7 @@ def album_for_id(releaseid):
albumid = _parse_id(releaseid)
if not albumid:
log.debug('Invalid MBID ({0}).', releaseid)
return
return None
try:
res = musicbrainzngs.get_release_by_id(albumid,
RELEASE_INCLUDES)
@ -603,14 +629,14 @@ def album_for_id(releaseid):
return album_info(res['release'])
def track_for_id(releaseid):
def track_for_id(releaseid: str) -> Optional[beets.autotag.hooks.TrackInfo]:
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found. May raise a MusicBrainzAPIError.
"""
trackid = _parse_id(releaseid)
if not trackid:
log.debug('Invalid MBID ({0}).', releaseid)
return
return None
try:
res = musicbrainzngs.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainzngs.ResponseError:

View file

@ -526,19 +526,6 @@ class FalseQuery(Query):
# Time/date queries.
def _to_epoch_time(date):
"""Convert a `datetime` object to an integer number of seconds since
the (local) Unix epoch.
"""
if hasattr(date, 'timestamp'):
# The `timestamp` method exists on Python 3.3+.
return int(date.timestamp())
else:
epoch = datetime.fromtimestamp(0)
delta = date - epoch
return int(delta.total_seconds())
def _parse_periods(pattern):
"""Parse a string containing two dates separated by two dots (..).
Return a pair of `Period` objects.
@ -724,13 +711,15 @@ class DateQuery(FieldQuery):
clause_parts = []
subvals = []
# Convert the `datetime` objects to an integer number of seconds since
# the (local) Unix epoch using `datetime.timestamp()`.
if self.interval.start:
clause_parts.append(self._clause_tmpl.format(self.field, ">="))
subvals.append(_to_epoch_time(self.interval.start))
subvals.append(int(self.interval.start.timestamp()))
if self.interval.end:
clause_parts.append(self._clause_tmpl.format(self.field, "<"))
subvals.append(_to_epoch_time(self.interval.end))
subvals.append(int(self.interval.end.timestamp()))
if clause_parts:
# One- or two-sided interval.

View file

@ -208,6 +208,27 @@ class String(Type):
return self.model_type(value)
class DelimitedString(String):
"""A list of Unicode strings, represented in-database by a single string
containing delimiter-separated values.
"""
model_type = list
def __init__(self, delimiter):
self.delimiter = delimiter
def format(self, value):
return self.delimiter.join(value)
def parse(self, string):
if not string:
return []
return string.split(self.delimiter)
def to_sql(self, model_value):
return self.delimiter.join(model_value)
class Boolean(Type):
"""A boolean type.
"""
@ -231,3 +252,4 @@ FLOAT = Float()
NULL_FLOAT = NullFloat()
STRING = String()
BOOLEAN = Boolean()
SEMICOLON_SPACE_DSV = DelimitedString(delimiter='; ')

View file

@ -51,6 +51,8 @@ class PathQuery(dbcore.FieldQuery):
default, the behavior depends on the OS: case-insensitive on Windows
and case-sensitive otherwise.
"""
# For tests
force_implicit_query_detection = False
def __init__(self, field, pattern, fast=True, case_sensitive=None):
"""Create a path query.
@ -62,21 +64,27 @@ class PathQuery(dbcore.FieldQuery):
"""
super().__init__(field, pattern, fast)
path = util.normpath(pattern)
# By default, the case sensitivity depends on the filesystem
# that the query path is located on.
if case_sensitive is None:
path = util.bytestring_path(util.normpath(pattern))
case_sensitive = beets.util.case_sensitive(path)
case_sensitive = util.case_sensitive(path)
self.case_sensitive = case_sensitive
# Use a normalized-case pattern for case-insensitive matches.
if not case_sensitive:
pattern = pattern.lower()
# We need to lowercase the entire path, not just the pattern.
# In particular, on Windows, the drive letter is otherwise not
# lowercased.
# This also ensures that the `match()` method below and the SQL
# from `col_clause()` do the same thing.
path = path.lower()
# Match the path as a single file.
self.file_path = util.bytestring_path(util.normpath(pattern))
self.file_path = path
# As a directory (prefix).
self.dir_path = util.bytestring_path(os.path.join(self.file_path, b''))
self.dir_path = os.path.join(path, b'')
@classmethod
def is_path_query(cls, query_part):
@ -90,11 +98,13 @@ class PathQuery(dbcore.FieldQuery):
# Test both `sep` and `altsep` (i.e., both slash and backslash on
# Windows).
return (
(os.sep in query_part or
(os.altsep and os.altsep in query_part)) and
os.path.exists(syspath(normpath(query_part)))
)
if not (os.sep in query_part
or (os.altsep and os.altsep in query_part)):
return False
if cls.force_implicit_query_detection:
return True
return os.path.exists(syspath(normpath(query_part)))
def match(self, item):
path = item.path if self.case_sensitive else item.path.lower()
@ -300,34 +310,26 @@ class FileOperationError(Exception):
self.path = path
self.reason = reason
def text(self):
def __str__(self):
"""Get a string representing the error.
Describe both the underlying reason and the file path
in question.
Describe both the underlying reason and the file path in question.
"""
return '{}: {}'.format(
util.displayable_path(self.path),
str(self.reason)
)
# define __str__ as text to avoid infinite loop on super() calls
# with @six.python_2_unicode_compatible
__str__ = text
return f"{util.displayable_path(self.path)}: {self.reason}"
class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`)."""
def __str__(self):
return 'error reading ' + super().text()
return 'error reading ' + str(super())
class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`)."""
def __str__(self):
return 'error writing ' + super().text()
return 'error writing ' + str(super())
# Item and Album model classes.
@ -502,7 +504,7 @@ class Item(LibModel):
'mb_releasetrackid': types.STRING,
'trackdisambig': types.STRING,
'albumtype': types.STRING,
'albumtypes': types.STRING,
'albumtypes': types.SEMICOLON_SPACE_DSV,
'label': types.STRING,
'acoustid_fingerprint': types.STRING,
'acoustid_id': types.STRING,
@ -1062,7 +1064,7 @@ class Album(LibModel):
'mb_albumid': types.STRING,
'mb_albumartistid': types.STRING,
'albumtype': types.STRING,
'albumtypes': types.STRING,
'albumtypes': types.SEMICOLON_SPACE_DSV,
'label': types.STRING,
'mb_releasegroupid': types.STRING,
'asin': types.STRING,
@ -1355,9 +1357,12 @@ class Album(LibModel):
"""
# Get modified track fields.
track_updates = {}
for key in self.item_keys:
if key in self._dirty:
track_deletes = set()
for key in self._dirty:
if key in self.item_keys:
track_updates[key] = self[key]
elif key not in self:
track_deletes.add(key)
with self._db.transaction():
super().store(fields)
@ -1366,6 +1371,12 @@ class Album(LibModel):
for key, value in track_updates.items():
item[key] = value
item.store()
if track_deletes:
for item in self.items():
for key in track_deletes:
if key in item:
del item[key]
item.store()
def try_sync(self, write, move):
"""Synchronize the album and its items with the database.

View file

@ -12,63 +12,51 @@
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""A drop-in replacement for the standard-library `logging` module that
allows {}-style log formatting on Python 2 and 3.
"""A drop-in replacement for the standard-library `logging` module.
Provides everything the "logging" module does. The only difference is
that when getLogger(name) instantiates a logger that logger uses
{}-style formatting.
Provides everything the "logging" module does. In addition, beets' logger
(as obtained by `getLogger(name)`) supports thread-local levels, and messages
use {}-style formatting and can interpolate keywords arguments to the logging
calls (`debug`, `info`, etc).
"""
from copy import copy
import subprocess
import sys
import threading
import logging
def logsafe(val):
"""Coerce a potentially "problematic" value so it can be formatted
in a Unicode log string.
"""Coerce `bytes` to `str` to avoid crashes solely due to logging.
This works around a number of pitfalls when logging objects in
Python 2:
- Logging path names, which must be byte strings, requires
conversion for output.
- Some objects, including some exceptions, will crash when you call
`unicode(v)` while `str(v)` works fine. CalledProcessError is an
example.
This is particularly relevant for bytestring paths. Much of our code
explicitly uses `displayable_path` for them, but better be safe and prevent
any crashes that are solely due to log formatting.
"""
# Already Unicode.
if isinstance(val, str):
return val
# Bytestring: needs decoding.
elif isinstance(val, bytes):
# Bytestring: Needs decoding to be safe for substitution in format strings.
if isinstance(val, bytes):
# Blindly convert with UTF-8. Eventually, it would be nice to
# (a) only do this for paths, if they can be given a distinct
# type, and (b) warn the developer if they do this for other
# bytestrings.
return val.decode('utf-8', 'replace')
# A "problem" object: needs a workaround.
elif isinstance(val, subprocess.CalledProcessError):
try:
return str(val)
except UnicodeDecodeError:
# An object with a broken __unicode__ formatter. Use __str__
# instead.
return str(val).decode('utf-8', 'replace')
# Other objects are used as-is so field access, etc., still works in
# the format string.
else:
return val
# the format string. Relies on a working __str__ implementation.
return val
class StrFormatLogger(logging.Logger):
"""A version of `Logger` that uses `str.format`-style formatting
instead of %-style formatting.
instead of %-style formatting and supports keyword arguments.
We cannot easily get rid of this even in the Python 3 era: This custom
formatting supports substitution from `kwargs` into the message, which the
default `logging.Logger._log()` implementation does not.
Remark by @sampsyo: https://stackoverflow.com/a/24683360 might be a way to
achieve this with less code.
"""
class _LogMessage:
@ -82,10 +70,28 @@ class StrFormatLogger(logging.Logger):
kwargs = {k: logsafe(v) for (k, v) in self.kwargs.items()}
return self.msg.format(*args, **kwargs)
def _log(self, level, msg, args, exc_info=None, extra=None, **kwargs):
def _log(self, level, msg, args, exc_info=None, extra=None,
stack_info=False, **kwargs):
"""Log msg.format(*args, **kwargs)"""
m = self._LogMessage(msg, args, kwargs)
return super()._log(level, m, (), exc_info, extra)
stacklevel = kwargs.pop("stacklevel", 1)
if sys.version_info >= (3, 8):
stacklevel = {"stacklevel": stacklevel}
else:
# Simply ignore this when not supported by current Python version.
# Can be dropped when we remove support for Python 3.7.
stacklevel = {}
return super()._log(
level,
m,
(),
exc_info=exc_info,
extra=extra,
stack_info=stack_info,
**stacklevel,
)
class ThreadLocalLevelLogger(logging.Logger):

View file

@ -790,9 +790,6 @@ def _store_dict(option, opt_str, value, parser):
setattr(parser.values, dest, {})
option_values = getattr(parser.values, dest)
# Decode the argument using the platform's argument encoding.
value = util.text_string(value, util.arg_encoding())
try:
key, value = value.split('=', 1)
if not (key and value):

View file

@ -1778,7 +1778,7 @@ def config_func(lib, opts, args):
else:
config_out = config.dump(full=opts.defaults, redact=opts.redact)
if config_out.strip() != '{}':
print_(util.text_string(config_out))
print_(config_out)
else:
print("Empty configuration")
@ -1852,7 +1852,7 @@ def completion_script(commands):
"""
base_script = os.path.join(os.path.dirname(__file__), 'completion_base.sh')
with open(base_script) as base_script:
yield util.text_string(base_script.read())
yield base_script.read()
options = {}
aliases = {}

View file

@ -532,10 +532,6 @@ def link(path, dest, replace=False):
raise FilesystemError('OS does not support symbolic links.'
'link', (path, dest), traceback.format_exc())
except OSError as exc:
# TODO: Windows version checks can be removed for python 3
if hasattr('sys', 'getwindowsversion'):
if sys.getwindowsversion()[0] < 6: # is before Vista
exc = 'OS does not support symbolic links.'
raise FilesystemError(exc, 'link', (path, dest),
traceback.format_exc())
@ -741,8 +737,7 @@ def legalize_path(path, replacements, length, extension, fragment):
def py3_path(path):
"""Convert a bytestring path to Unicode on Python 3 only. On Python
2, return the bytestring path unchanged.
"""Convert a bytestring path to Unicode.
This helps deal with APIs on Python 3 that *only* accept Unicode
(i.e., `str` objects). I philosophically disagree with this
@ -775,19 +770,6 @@ def as_string(value):
return str(value)
def text_string(value, encoding='utf-8'):
"""Convert a string, which can either be bytes or unicode, to
unicode.
Text (unicode) is left untouched; bytes are decoded. This is useful
to convert from a "native string" (bytes on Python 2, str on Python
3) to a consistently unicode value.
"""
if isinstance(value, bytes):
return value.decode(encoding)
return value
def plurality(objs):
"""Given a sequence of hashble objects, returns the object that
is most common in the set and the its number of appearance. The
@ -868,10 +850,7 @@ def command_output(cmd, shell=False):
"""
cmd = convert_command_args(cmd)
try: # python >= 3.3
devnull = subprocess.DEVNULL
except AttributeError:
devnull = open(os.devnull, 'r+b')
devnull = subprocess.DEVNULL
proc = subprocess.Popen(
cmd,
@ -957,61 +936,52 @@ def interactive_open(targets, command):
return os.execlp(*args)
def _windows_long_path_name(short_path):
"""Use Windows' `GetLongPathNameW` via ctypes to get the canonical,
long path given a short filename.
"""
if not isinstance(short_path, str):
short_path = short_path.decode(_fsencoding())
import ctypes
buf = ctypes.create_unicode_buffer(260)
get_long_path_name_w = ctypes.windll.kernel32.GetLongPathNameW
return_value = get_long_path_name_w(short_path, buf, 260)
if return_value == 0 or return_value > 260:
# An error occurred
return short_path
else:
long_path = buf.value
# GetLongPathNameW does not change the case of the drive
# letter.
if len(long_path) > 1 and long_path[1] == ':':
long_path = long_path[0].upper() + long_path[1:]
return long_path
def case_sensitive(path):
"""Check whether the filesystem at the given path is case sensitive.
To work best, the path should point to a file or a directory. If the path
does not exist, assume a case sensitive file system on every platform
except Windows.
Currently only used for absolute paths by beets; may have a trailing
path separator.
"""
# A fallback in case the path does not exist.
if not os.path.exists(syspath(path)):
# By default, the case sensitivity depends on the platform.
return platform.system() != 'Windows'
# Look at parent paths until we find a path that actually exists, or
# reach the root.
while True:
head, tail = os.path.split(path)
if head == path:
# We have reached the root of the file system.
# By default, the case sensitivity depends on the platform.
return platform.system() != 'Windows'
# If an upper-case version of the path exists but a lower-case
# version does not, then the filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not (os.path.exists(syspath(path.lower())) and
os.path.exists(syspath(path.upper()))):
return True
# Trailing path separator, or path does not exist.
if not tail or not os.path.exists(path):
path = head
continue
# Both versions of the path exist on the file system. Check whether
# they refer to different files by their inodes. Alas,
# `os.path.samefile` is only available on Unix systems on Python 2.
if platform.system() != 'Windows':
return not os.path.samefile(syspath(path.lower()),
syspath(path.upper()))
upper_tail = tail.upper()
lower_tail = tail.lower()
# On Windows, we check whether the canonical, long filenames for the
# files are the same.
lower = _windows_long_path_name(path.lower())
upper = _windows_long_path_name(path.upper())
return lower != upper
# In case we can't tell from the given path name, look at the
# parent directory.
if upper_tail == lower_tail:
path = head
continue
upper_sys = syspath(os.path.join(head, upper_tail))
lower_sys = syspath(os.path.join(head, lower_tail))
# If either the upper-cased or lower-cased path does not exist, the
# filesystem must be case-sensitive.
# (Otherwise, we have more work to do.)
if not os.path.exists(upper_sys) or not os.path.exists(lower_sys):
return True
# Original and both upper- and lower-cased versions of the path
# exist on the file system. Check whether they refer to different
# files by their inodes (or an alternative method on Windows).
return not os.path.samefile(lower_sys, upper_sys)
def raw_seconds_short(string):
@ -1054,8 +1024,7 @@ def asciify_path(path, sep_replace):
def par_map(transform, items):
"""Apply the function `transform` to all the elements in the
iterable `items`, like `map(transform, items)` but with no return
value. The map *might* happen in parallel: it's parallel on Python 3
and sequential on Python 2.
value.
The parallelism uses threads (not processes), so this is only useful
for IO-bound `transform`s.

View file

@ -530,18 +530,7 @@ def _parse(template):
return Expression(parts)
def cached(func):
"""Like the `functools.lru_cache` decorator, but works (as a no-op)
on Python < 3.2.
"""
if hasattr(functools, 'lru_cache'):
return functools.lru_cache(maxsize=128)(func)
else:
# Do nothing when lru_cache is not available.
return func
@cached
@functools.lru_cache(maxsize=128)
def template(fmt):
return Template(fmt)

View file

@ -56,10 +56,13 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self._log.warning("This plugin is deprecated.")
self.config.add({
'extractor': '',
'force': False,
'pretend': False
'pretend': False,
'base_url': ''
})
self.extractor = self.config['extractor'].as_str()
@ -79,7 +82,7 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
except OSError:
raise ui.UserError(
'No extractor command found: please install the extractor'
' binary from https://acousticbrainz.org/download'
' binary from https://essentia.upf.edu/'
)
except ABSubmitError:
# Extractor found, will exit with an error if not called with
@ -96,7 +99,15 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
self.extractor_sha.update(extractor.read())
self.extractor_sha = self.extractor_sha.hexdigest()
base_url = 'https://acousticbrainz.org/api/v1/{mbid}/low-level'
self.url = ''
base_url = self.config['base_url'].as_str()
if base_url:
if not base_url.startswith('http'):
raise ui.UserError('AcousticBrainz server base URL must start '
'with an HTTP scheme')
elif base_url[-1] != '/':
base_url = base_url + '/'
self.url = base_url + '{mbid}/low-level'
def commands(self):
cmd = ui.Subcommand(
@ -118,10 +129,17 @@ only files which would be processed'
return [cmd]
def command(self, lib, opts, args):
# Get items from arguments
items = lib.items(ui.decargs(args))
self.opts = opts
util.par_map(self.analyze_submit, items)
if not self.url:
raise ui.UserError(
'This plugin is deprecated since AcousticBrainz no longer '
'accepts new submissions. See the base_url configuration '
'option.'
)
else:
# Get items from arguments
items = lib.items(ui.decargs(args))
self.opts = opts
util.par_map(self.analyze_submit, items)
def analyze_submit(self, item):
analysis = self._get_analysis(item)
@ -179,7 +197,7 @@ only files which would be processed'
def _submit_data(self, item, data):
mbid = item['mb_trackid']
headers = {'Content-Type': 'application/json'}
response = requests.post(self.base_url.format(mbid=mbid),
response = requests.post(self.url.format(mbid=mbid),
json=data, headers=headers)
# Test that request was successful and raise an error on failure.
if response.status_code != 200:

View file

@ -22,7 +22,6 @@ import requests
from beets import plugins, ui
from beets.dbcore import types
ACOUSTIC_BASE = "https://acousticbrainz.org/"
LEVELS = ["/low-level", "/high-level"]
ABSCHEME = {
'highlevel': {
@ -138,12 +137,23 @@ class AcousticPlugin(plugins.BeetsPlugin):
def __init__(self):
super().__init__()
self._log.warning("This plugin is deprecated.")
self.config.add({
'auto': True,
'force': False,
'tags': []
'tags': [],
'base_url': ''
})
self.base_url = self.config['base_url'].as_str()
if self.base_url:
if not self.base_url.startswith('http'):
raise ui.UserError('AcousticBrainz server base URL must start '
'with an HTTP scheme')
elif self.base_url[-1] != '/':
self.base_url = self.base_url + '/'
if self.config['auto']:
self.register_listener('import_task_files',
self.import_task_files)
@ -171,8 +181,13 @@ class AcousticPlugin(plugins.BeetsPlugin):
self._fetch_info(task.imported_items(), False, True)
def _get_data(self, mbid):
if not self.base_url:
raise ui.UserError(
'This plugin is deprecated since AcousticBrainz has shut '
'down. See the base_url configuration option.'
)
data = {}
for url in _generate_urls(mbid):
for url in _generate_urls(self.base_url, mbid):
self._log.debug('fetching URL: {}', url)
try:
@ -328,8 +343,8 @@ class AcousticPlugin(plugins.BeetsPlugin):
'because key {} was not found', subdata, v, k)
def _generate_urls(mbid):
def _generate_urls(base_url, mbid):
"""Generates AcousticBrainz end point urls for given `mbid`.
"""
for level in LEVELS:
yield ACOUSTIC_BASE + mbid + level
yield base_url + mbid + level

View file

@ -55,7 +55,7 @@ class AlbumTypesPlugin(BeetsPlugin):
bracket_r = ''
res = ''
albumtypes = item.albumtypes.split('; ')
albumtypes = item.albumtypes
is_va = item.mb_albumartistid == VARIOUS_ARTISTS_ID
for type in types:
if type[0] in albumtypes and type[1]:

View file

@ -180,6 +180,44 @@ class DiscogsPlugin(BeetsPlugin):
self._log.debug('Connection error in album search', exc_info=True)
return []
def item_candidates(self, item, artist, title):
"""Returns a list of TrackInfo objects for Search API results
matching ``title`` and ``artist``.
:param item: Singleton item to be matched.
:type item: beets.library.Item
:param artist: The artist of the track to be matched.
:type artist: str
:param title: The title of the track to be matched.
:type title: str
:return: Candidate TrackInfo objects.
:rtype: list[beets.autotag.hooks.TrackInfo]
"""
if not self.discogs_client:
return
if not artist and not title:
self._log.debug('Skipping Discogs query. File missing artist and '
'title tags.')
query = f'{artist} {title}'
try:
albums = self.get_albums(query)
except DiscogsAPIError as e:
self._log.debug('API Error: {0} (query: {1})', e, query)
if e.status_code == 401:
self.reset_auth()
return self.item_candidates(item, artist, title)
else:
return []
except CONNECTION_ERRORS:
self._log.debug('Connection error in track search', exc_info=True)
candidates = []
for album_cur in albums:
self._log.debug(u'searching within album {0}', album_cur.album)
candidates += album_cur.tracks
# first 10 results, don't overwhelm with options
return candidates[:10]
@staticmethod
def extract_release_id_regex(album_id):
"""Returns the Discogs_id or None."""
@ -351,9 +389,15 @@ class DiscogsPlugin(BeetsPlugin):
for track in tracks:
track.media = media
track.medium_total = mediums.count(track.medium)
if not track.artist: # get_track_info often fails to find artist
track.artist = artist
if not track.artist_id:
track.artist_id = artist_id
# Discogs does not have track IDs. Invent our own IDs as proposed
# in #2336.
track.track_id = str(album_id) + "-" + track.track_alt
track.data_url = data_url
track.data_source = 'Discogs'
# Retrieve master release id (returns None if there isn't one).
master_id = result.data.get('master_id')

View file

@ -367,7 +367,7 @@ class CoverArtArchive(RemoteArtSource):
ID.
"""
def get_image_urls(url, size_suffix=None):
def get_image_urls(url, preferred_width=None):
try:
response = self.request(url)
except requests.RequestException:
@ -387,8 +387,8 @@ class CoverArtArchive(RemoteArtSource):
if 'Front' not in item['types']:
continue
if size_suffix:
yield item['thumbnails'][size_suffix]
if preferred_width:
yield item['thumbnails'][preferred_width]
else:
yield item['image']
except KeyError:
@ -401,12 +401,12 @@ class CoverArtArchive(RemoteArtSource):
# If the maxwidth config matches one of the already available sizes
# fetch it directly intead of fetching the full sized image and
# resizing it.
size_suffix = None
preferred_width = None
if plugin.maxwidth in self.VALID_THUMBNAIL_SIZES:
size_suffix = "-" + str(plugin.maxwidth)
preferred_width = str(plugin.maxwidth)
if 'release' in self.match_by and album.mb_albumid:
for url in get_image_urls(release_url, size_suffix):
for url in get_image_urls(release_url, preferred_width):
yield self._candidate(url=url, match=Candidate.MATCH_EXACT)
if 'releasegroup' in self.match_by and album.mb_releasegroupid:

View file

@ -26,39 +26,20 @@ class CodingFormatter(string.Formatter):
"""A variant of `string.Formatter` that converts everything to `unicode`
strings.
This is necessary on Python 2, where formatting otherwise occurs on
bytestrings. It intercepts two points in the formatting process to decode
the format string and all fields using the specified encoding. If decoding
fails, the values are used as-is.
This was necessary on Python 2, in needs to be kept for backwards
compatibility.
"""
def __init__(self, coding):
"""Creates a new coding formatter with the provided coding."""
self._coding = coding
def format(self, format_string, *args, **kwargs):
"""Formats the provided string using the provided arguments and keyword
arguments.
This method decodes the format string using the formatter's coding.
See str.format and string.Formatter.format.
"""
if isinstance(format_string, bytes):
format_string = format_string.decode(self._coding)
return super().format(format_string, *args,
**kwargs)
def convert_field(self, value, conversion):
"""Converts the provided value given a conversion type.
This method decodes the converted value using the formatter's coding.
See string.Formatter.convert_field.
"""
converted = super().convert_field(value,
conversion)
converted = super().convert_field(value, conversion)
if isinstance(converted, bytes):
return converted.decode(self._coding)
@ -92,14 +73,13 @@ class HookPlugin(BeetsPlugin):
self._log.error('invalid command "{0}"', command)
return
# Use a string formatter that works on Unicode strings.
# For backwards compatibility, use a string formatter that decodes
# bytes (in particular, paths) to unicode strings.
formatter = CodingFormatter(arg_encoding())
command_pieces = shlex.split(command)
for i, piece in enumerate(command_pieces):
command_pieces[i] = formatter.format(piece, event=event,
**kwargs)
command_pieces = [
formatter.format(piece, event=event, **kwargs)
for piece in shlex.split(command)
]
self._log.debug('running command "{0}" for event {1}',
' '.join(command_pieces), event)

View file

@ -67,12 +67,6 @@ class KeyFinderPlugin(BeetsPlugin):
except (subprocess.CalledProcessError, OSError) as exc:
self._log.error('execution failed: {0}', exc)
continue
except UnicodeEncodeError:
# Workaround for Python 2 Windows bug.
# https://bugs.python.org/issue1759845
self._log.error('execution failed for Unicode path: {0!r}',
item.path)
continue
try:
key_raw = output.rsplit(None, 1)[-1]
@ -83,7 +77,7 @@ class KeyFinderPlugin(BeetsPlugin):
continue
try:
key = util.text_string(key_raw)
key = key_raw.decode("utf-8")
except UnicodeDecodeError:
self._log.error('output is invalid UTF-8')
continue

View file

@ -6,9 +6,10 @@ like the following in your config.yaml to configure:
dir: 755
"""
import os
from beets import config, util
import stat
from beets import config
from beets.plugins import BeetsPlugin
from beets.util import ancestry
from beets.util import ancestry, displayable_path, syspath
def convert_perm(perm):
@ -25,7 +26,7 @@ def check_permissions(path, permission):
"""Check whether the file's permissions equal the given vector.
Return a boolean.
"""
return oct(os.stat(path).st_mode & 0o777) == oct(permission)
return oct(stat.S_IMODE(os.stat(syspath(path)).st_mode)) == oct(permission)
def assert_permissions(path, permission, log):
@ -33,15 +34,12 @@ def assert_permissions(path, permission, log):
log a warning message. Return a boolean indicating the match, like
`check_permissions`.
"""
if not check_permissions(util.syspath(path), permission):
log.warning(
'could not set permissions on {}',
util.displayable_path(path),
)
if not check_permissions(path, permission):
log.warning('could not set permissions on {}', displayable_path(path))
log.debug(
'set permissions to {}, but permissions are now {}',
permission,
os.stat(util.syspath(path)).st_mode & 0o777,
os.stat(syspath(path)).st_mode & 0o777,
)
@ -101,9 +99,10 @@ class Permissions(BeetsPlugin):
# Changing permissions on the destination file.
self._log.debug(
'setting file permissions on {}',
util.displayable_path(path),
displayable_path(path),
)
os.chmod(util.syspath(path), file_perm)
if not check_permissions(path, file_perm):
os.chmod(syspath(path), file_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, file_perm, self._log)
@ -113,9 +112,10 @@ class Permissions(BeetsPlugin):
# Changing permissions on the destination directory.
self._log.debug(
'setting directory permissions on {}',
util.displayable_path(path),
displayable_path(path),
)
os.chmod(util.syspath(path), dir_perm)
if not check_permissions(path, dir_perm):
os.chmod(syspath(path), dir_perm)
# Checks if the destination path has the permissions configured.
assert_permissions(path, dir_perm, self._log)

View file

@ -19,17 +19,12 @@
from beets.plugins import BeetsPlugin
from beets import ui
from beets.util import (mkdirall, normpath, sanitize_path, syspath,
bytestring_path, path_as_posix)
bytestring_path, path_as_posix, displayable_path)
from beets.library import Item, Album, parse_query_string
from beets.dbcore import OrQuery
from beets.dbcore.query import MultipleSort, ParsingError
import os
try:
from urllib.request import pathname2url
except ImportError:
# python2 is a bit different
from urllib import pathname2url
from urllib.request import pathname2url
class SmartPlaylistPlugin(BeetsPlugin):
@ -44,6 +39,7 @@ class SmartPlaylistPlugin(BeetsPlugin):
'forward_slash': False,
'prefix': '',
'urlencode': False,
'pretend_paths': False,
})
self.config['prefix'].redact = True # May contain username/password.
@ -59,6 +55,10 @@ class SmartPlaylistPlugin(BeetsPlugin):
help='update the smart playlists. Playlist names may be '
'passed as arguments.'
)
spl_update.parser.add_option(
'-p', '--pretend', action='store_true',
help="display query results but don't write playlist files."
)
spl_update.func = self.update_cmd
return [spl_update]
@ -84,7 +84,7 @@ class SmartPlaylistPlugin(BeetsPlugin):
else:
self._matched_playlists = self._unmatched_playlists
self.update_playlists(lib)
self.update_playlists(lib, opts.pretend)
def build_queries(self):
"""
@ -170,9 +170,13 @@ class SmartPlaylistPlugin(BeetsPlugin):
self._unmatched_playlists -= self._matched_playlists
def update_playlists(self, lib):
self._log.info("Updating {0} smart playlists...",
len(self._matched_playlists))
def update_playlists(self, lib, pretend=False):
if pretend:
self._log.info("Showing query results for {0} smart playlists...",
len(self._matched_playlists))
else:
self._log.info("Updating {0} smart playlists...",
len(self._matched_playlists))
playlist_dir = self.config['playlist_dir'].as_filename()
playlist_dir = bytestring_path(playlist_dir)
@ -185,7 +189,10 @@ class SmartPlaylistPlugin(BeetsPlugin):
for playlist in self._matched_playlists:
name, (query, q_sort), (album_query, a_q_sort) = playlist
self._log.debug("Creating playlist {0}", name)
if pretend:
self._log.info('Results for playlist {}:', name)
else:
self._log.debug("Creating playlist {0}", name)
items = []
if query:
@ -206,19 +213,29 @@ class SmartPlaylistPlugin(BeetsPlugin):
item_path = os.path.relpath(item.path, relative_to)
if item_path not in m3us[m3u_name]:
m3us[m3u_name].append(item_path)
if pretend and self.config['pretend_paths']:
print(displayable_path(item_path))
elif pretend:
print(item)
prefix = bytestring_path(self.config['prefix'].as_str())
# Write all of the accumulated track lists to files.
for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir,
bytestring_path(m3u)))
mkdirall(m3u_path)
with open(syspath(m3u_path), 'wb') as f:
for path in m3us[m3u]:
if self.config['forward_slash'].get():
path = path_as_posix(path)
if self.config['urlencode']:
path = bytestring_path(pathname2url(path))
f.write(prefix + path + b'\n')
if not pretend:
prefix = bytestring_path(self.config['prefix'].as_str())
# Write all of the accumulated track lists to files.
for m3u in m3us:
m3u_path = normpath(os.path.join(playlist_dir,
bytestring_path(m3u)))
mkdirall(m3u_path)
with open(syspath(m3u_path), 'wb') as f:
for path in m3us[m3u]:
if self.config['forward_slash'].get():
path = path_as_posix(path)
if self.config['urlencode']:
path = bytestring_path(pathname2url(path))
f.write(prefix + path + b'\n')
self._log.info("{0} playlists updated", len(self._matched_playlists))
if pretend:
self._log.info("Displayed results for {0} playlists",
len(self._matched_playlists))
else:
self._log.info("{0} playlists updated",
len(self._matched_playlists))

View file

@ -298,7 +298,7 @@ class SpotifyPlugin(MetadataSourcePlugin, BeetsPlugin):
# Get album information for spotify tracks
try:
album = track_data['album']['name']
except KeyError:
except (KeyError, TypeError):
album = None
return TrackInfo(
title=track_data['name'],

View file

@ -307,19 +307,24 @@ def item_file(item_id):
else:
item_path = util.py3_path(item.path)
try:
unicode_item_path = util.text_string(item.path)
except (UnicodeDecodeError, UnicodeEncodeError):
unicode_item_path = util.displayable_path(item.path)
base_filename = os.path.basename(item_path)
# FIXME: Arguably, this should just use `displayable_path`: The latter
# tries `_fsencoding()` first, but then falls back to `utf-8`, too.
if isinstance(base_filename, bytes):
try:
unicode_base_filename = base_filename.decode("utf-8")
except UnicodeError:
unicode_base_filename = util.displayable_path(base_filename)
else:
unicode_base_filename = base_filename
base_filename = os.path.basename(unicode_item_path)
try:
# Imitate http.server behaviour
base_filename.encode("latin-1", "strict")
except UnicodeEncodeError:
except UnicodeError:
safe_filename = unidecode(base_filename)
else:
safe_filename = base_filename
safe_filename = unicode_base_filename
response = flask.send_file(
item_path,

35
docs/changelog.rst Executable file → Normal file
View file

@ -4,10 +4,15 @@ Changelog
1.6.1 (in development)
----------------------
Changelog goes here!
Changelog goes here! Please add your entry to the bottom of one of the lists below!
With this release, beets now requires Python 3.7 or later (it removes support
for Python 3.6).
New features:
* Added additional error handling for `spotify` plugin.
:bug:`4686`
* We now import the remixer field from Musicbrainz into the library.
:bug:`4428`
* :doc:`/plugins/mbsubmit`: Added a new `mbsubmit` command to print track information to be submitted to MusicBrainz after initial import.
@ -36,7 +41,8 @@ New features:
* Add :ref:`exact match <exact-match>` queries, using the prefixes ``=`` and
``=~``.
:bug:`4251`
* :doc:`/plugins/discogs`: Permit appending style to genre
* :doc:`/plugins/discogs`: Permit appending style to genre.
* :doc:`plugins/discogs`: Implement item_candidates for matching singletons.
* :doc:`/plugins/convert`: Add a new `auto_keep` option that automatically
converts files but keeps the *originals* in the library.
:bug:`1840` :bug:`4302`
@ -49,18 +55,24 @@ New features:
:bug:`4438`
* Add a new ``import.ignored_alias_types`` config option to allow for
specific alias types to be skipped over when importing items/albums.
* :doc:`/plugins/smartplaylist`: A new ``--pretend`` option lets the user see
what a new or changed smart playlist saved in the config is actually
returning.
:bug:`4573`
* :doc:`/plugins/fromfilename`: Add debug log messages that inform when the
plugin replaced bad (missing) artist, title or tracknumber metadata.
:bug:`4561` :bug:`4600`
Bug fixes:
* :doc:`/plugins/fetchart`: Fix fetching from Cover Art Archive when the
`maxwidth` option is set to one of the supported Cover Art Archive widths.
* :doc:`/plugins/discogs`: Fix "Discogs plugin replacing Feat. or Ft. with
a comma" by fixing an oversight that removed a functionality from the code
base when the MetadataSourcePlugin abstract class was introduced in PR's
#3335 and #3371.
:bug:`4401`
* :doc:`/plugins/convert`: Set default ``max_bitrate`` value to ``None`` to
* :doc:`/plugins/convert`: Set default ``max_bitrate`` value to ``None`` to
avoid transcoding when this parameter is not set. :bug:`4472`
* :doc:`/plugins/replaygain`: Avoid a crash when errors occur in the analysis
backend.
@ -133,9 +145,19 @@ Bug fixes:
* :doc:`/plugins/fromfilename`: Fix failed detection of <track> <title>
filename patterns.
:bug:`4561` :bug:`4600`
* Fix issue where deletion of flexible fields on an album doesn't cascade to items
:bug:`4662`
* Fix issue where ``beet write`` continuosly retags the ``albumtypes`` metadata
field in files. Additionally broken data could have been added to the library
when the tag was read from file back into the library using ``beet update``.
It is required for all users to **check if such broken data is present in the
library**. Following the instructions `described here
<https://github.com/beetbox/beets/pull/4582#issuecomment-1445023493>`_, a
sanity check and potential fix is easily possible. :bug:`4528`
For packagers:
* As noted above, the minimum Python version is now 3.7.
* We fixed a version for the dependency on the `Confuse`_ library.
:bug:`4167`
* The minimum required version of :pypi:`mediafile` is now 0.9.0.
@ -143,6 +165,13 @@ For packagers:
Other changes:
* Add ``sphinx`` and ``sphinx_rtd_theme`` as dependencies for a new ``docs`` extra
:bug:`4643`
* :doc:`/plugins/absubmit`: Deprecate the ``absubmit`` plugin since
AcousticBrainz has stopped accepting new submissions.
:bug:`4627`
* :doc:`/plugins/acousticbrainz`: Deprecate the ``acousticbrainz`` plugin
since the AcousticBrainz project has shut down.
:bug:`4627`
* :doc:`/plugins/limit`: Limit query results to head or tail (``lslimit``
command only)
* :doc:`/plugins/fish`: Add ``--output`` option.

View file

@ -10,7 +10,7 @@ Installing
----------
You will need Python.
Beets works on Python 3.6 or later.
Beets works on Python 3.7 or later.
* **macOS** 11 (Big Sur) includes Python 3.8 out of the box.
You can opt for a more recent Python installing it via `Homebrew`_
@ -94,7 +94,7 @@ Installing on Windows
Installing beets on Windows can be tricky. Following these steps might help you
get it right:
1. If you don't have it, `install Python`_ (you want at least Python 3.6). The
1. If you don't have it, `install Python`_ (you want at least Python 3.7). The
installer should give you the option to "add Python to PATH." Check this
box. If you do that, you can skip the next step.
@ -105,7 +105,7 @@ get it right:
should open the "System Properties" screen, then select the "Advanced" tab,
then hit the "Environmental Variables..." button, and then look for the PATH
variable in the table. Add the following to the end of the variable's value:
``;C:\Python36;C:\Python36\Scripts``. You may need to adjust these paths to
``;C:\Python37;C:\Python37\Scripts``. You may need to adjust these paths to
point to your Python installation.
3. Now install beets by running: ``pip install beets``
@ -298,9 +298,12 @@ You can always get help using the ``beet help`` command. The plain ``beet help``
command lists all the available commands; then, for example, ``beet help
import`` gives more specific help about the ``import`` command.
Please let me know what you think of beets via `the discussion board`_ or
`Twitter`_.
If you need more of a walkthrough, you can read an illustrated one `on the
beets blog <https://beets.io/blog/walkthrough.html>`_.
Please let us know what you think of beets via `the discussion board`_ or
`Mastodon`_.
.. _the mailing list: https://groups.google.com/group/beets-users
.. _the discussion board: https://discourse.beets.io
.. _twitter: https://twitter.com/b33ts
.. _the discussion board: https://github.com/beetbox/beets/discussions
.. _mastodon: https://fosstodon.org/@beets

View file

@ -1,8 +1,17 @@
AcousticBrainz Submit Plugin
============================
The ``absubmit`` plugin lets you submit acoustic analysis results to the
`AcousticBrainz`_ server.
The ``absubmit`` plugin lets you submit acoustic analysis results to an
`AcousticBrainz`_ server. This plugin is now deprecated since the
AcousicBrainz project has been shut down.
As an alternative the `beets-xtractor`_ plugin can be used.
Warning
-------
The AcousticBrainz project has shut down. To use this plugin you must set the
``base_url`` configuration option to a server offering the AcousticBrainz API.
Installation
------------
@ -57,10 +66,14 @@ file. The available options are:
- **pretend**: Do not analyze and submit of AcousticBrainz data but print out
the items which would be processed.
Default: ``no``.
- **base_url**: The base URL of the AcousticBrainz server. The plugin has no
function if this option is not set.
Default: None
.. _streaming_extractor_music: https://acousticbrainz.org/download
.. _streaming_extractor_music: https://essentia.upf.edu/
.. _FAQ: https://acousticbrainz.org/faq
.. _pip: https://pip.pypa.io
.. _requests: https://requests.readthedocs.io/en/master/
.. _github: https://github.com/MTG/essentia
.. _AcousticBrainz: https://acousticbrainz.org
.. _beets-xtractor: https://github.com/adamjakab/BeetsPluginXtractor

View file

@ -2,9 +2,13 @@ AcousticBrainz Plugin
=====================
The ``acousticbrainz`` plugin gets acoustic-analysis information from the
`AcousticBrainz`_ project.
`AcousticBrainz`_ project. This plugin is now deprecated since the
AcousicBrainz project has been shut down.
As an alternative the `beets-xtractor`_ plugin can be used.
.. _AcousticBrainz: https://acousticbrainz.org/
.. _beets-xtractor: https://github.com/adamjakab/BeetsPluginXtractor
Enable the ``acousticbrainz`` plugin in your configuration (see :ref:`using-plugins`) and run it by typing::
@ -44,6 +48,12 @@ these fields:
* ``tonal``
* ``voice_instrumental``
Warning
-------
The AcousticBrainz project has shut down. To use this plugin you must set the
``base_url`` configuration option to a server offering the AcousticBrainz API.
Automatic Tagging
-----------------
@ -56,7 +66,7 @@ Configuration
-------------
To configure the plugin, make a ``acousticbrainz:`` section in your
configuration file. There are three options:
configuration file. The available options are:
- **auto**: Enable AcousticBrainz during ``beet import``.
Default: ``yes``.
@ -64,4 +74,7 @@ configuration file. There are three options:
it.
Default: ``no``.
- **tags**: Which tags from the list above to set on your files.
Default: [] (all)
Default: [] (all).
- **base_url**: The base URL of the AcousticBrainz server. The plugin has no
function if this option is not set.
Default: None

View file

@ -11,6 +11,11 @@ you can use in your path formats or elsewhere.
.. _MusicBrainz documentation: https://musicbrainz.org/doc/Release_Group/Type
A bug introduced in beets 1.6.0 could have possibly imported broken data into
the ``albumtypes`` library field. Please follow the instructions `described
here <https://github.com/beetbox/beets/pull/4582#issuecomment-1445023493>`_ for
a sanity check and potential fix. :bug:`4528`
Configuration
-------------

View file

@ -2,8 +2,7 @@ Discogs Plugin
==============
The ``discogs`` plugin extends the autotagger's search capabilities to
include matches from the `Discogs`_ database when importing albums.
(The plugin does not yet support matching singleton tracks.)
include matches from the `Discogs`_ database.
.. _Discogs: https://discogs.com
@ -11,9 +10,11 @@ Installation
------------
To use the ``discogs`` plugin, first enable it in your configuration (see
:ref:`using-plugins`). Then, install the `python3-discogs-client`_ library by typing::
:ref:`using-plugins`). Then, install the `python3-discogs-client`_ library by typing:
pip install python3-discogs-client
.. code-block:: console
$ pip install python3-discogs-client
You will also need to register for a `Discogs`_ account, and provide
authentication credentials via a personal access token or an OAuth2
@ -39,11 +40,19 @@ Authentication via Personal Access Token
As an alternative to OAuth, you can get a token from Discogs and add it to
your configuration.
To get a personal access token (called a "user token" in the `python3-discogs-client`_
documentation), login to `Discogs`_, and visit the
`Developer settings page
<https://www.discogs.com/settings/developers>`_. Press the ``Generate new
token`` button, and place the generated token in your configuration, as the
``user_token`` config option in the ``discogs`` section.
documentation):
#. login to `Discogs`_;
#. visit the `Developer settings page <https://www.discogs.com/settings/developers>`_;
#. press the *Generate new token* button;
#. copy the generated token;
#. place it in your configuration in the ``discogs`` section as the ``user_token`` option:
.. code-block:: yaml
discogs:
user_token: "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
Configuration
-------------
@ -54,22 +63,30 @@ There is one additional option in the ``discogs:`` section, ``index_tracks``.
Index tracks (see the `Discogs guidelines
<https://support.discogs.com/hc/en-us/articles/360005055373-Database-Guidelines-12-Tracklisting#Index_Tracks_And_Headings>`_),
along with headers, mark divisions between distinct works on the same release
or within works. When ``index_tracks`` is enabled::
or within works. When ``index_tracks`` is enabled:
.. code-block:: yaml
discogs:
index_tracks: yes
beets will incorporate the names of the divisions containing each track into
the imported track's title. For example, importing
the imported track's title.
For example, importing
`this album
<https://www.discogs.com/Handel-Sutherland-Kirkby-Kwella-Nelson-Watkinson-Bowman-Rolfe-Johnson-Elliott-Partridge-Thomas-The-A/release/2026070>`_
would result in track names like::
would result in track names like:
.. code-block:: text
Messiah, Part I: No.1: Sinfony
Messiah, Part II: No.22: Chorus- Behold The Lamb Of God
Athalia, Act I, Scene I: Sinfonia
whereas with ``index_tracks`` disabled you'd get::
whereas with ``index_tracks`` disabled you'd get:
.. code-block:: text
No.1: Sinfony
No.22: Chorus- Behold The Lamb Of God
@ -81,7 +98,7 @@ Other configurations available under ``discogs:`` are:
- **append_style_genre**: Appends the Discogs style (if found) to the genre tag. This can be useful if you want more granular genres to categorize your music.
For example, a release in Discogs might have a genre of "Electronic" and a style of "Techno": enabling this setting would set the genre to be "Electronic, Techno" (assuming default separator of ``", "``) instead of just "Electronic".
Default: ``false``
Default: ``False``
- **separator**: How to join multiple genre and style values from Discogs into a string.
Default: ``", "``
@ -100,8 +117,7 @@ Here are two things you can try:
* Make sure that your system clock is accurate. The Discogs servers can reject
your request if your clock is too out of sync.
The plugin can only match albums, so no Discogs matches will be
reported when importing singletons using ``-s``. One possible
workaround is to use the ``--group-albums`` option.
Matching tracks by Discogs ID is not yet supported. The ``--group-albums``
option in album import mode provides an alternative to singleton mode for autotagging tracks that are not in album-related folders.
.. _python3-discogs-client: https://github.com/joalla/discogs_client

View file

@ -132,13 +132,21 @@ following to your configuration::
Autotagger Extensions
---------------------
* :doc:`chroma`: Use acoustic fingerprinting to identify audio files with
missing or incorrect metadata.
* :doc:`discogs`: Search for releases in the `Discogs`_ database.
* :doc:`spotify`: Search for releases in the `Spotify`_ database.
* :doc:`deezer`: Search for releases in the `Deezer`_ database.
* :doc:`fromfilename`: Guess metadata for untagged tracks from their
filenames.
:doc:`chroma <chroma>`
Use acoustic fingerprinting to identify audio files with
missing or incorrect metadata.
:doc:`discogs <discogs>`
Search for releases in the `Discogs`_ database.
:doc:`spotify <spotify>`
Search for releases in the `Spotify`_ database.
:doc:`deezer <deezer>`
Search for releases in the `Deezer`_ database.
:doc:`fromfilename <fromfilename>`
Guess metadata for untagged tracks from their filenames.
.. _Discogs: https://www.discogs.com/
.. _Spotify: https://www.spotify.com
@ -147,30 +155,69 @@ Autotagger Extensions
Metadata
--------
* :doc:`absubmit`: Analyse audio with the `streaming_extractor_music`_ program and submit the metadata to the AcousticBrainz server
* :doc:`acousticbrainz`: Fetch various AcousticBrainz metadata
* :doc:`bpm`: Measure tempo using keystrokes.
* :doc:`bpsync`: Fetch updated metadata from Beatport.
* :doc:`edit`: Edit metadata from a text editor.
* :doc:`embedart`: Embed album art images into files' metadata.
* :doc:`fetchart`: Fetch album cover art from various sources.
* :doc:`ftintitle`: Move "featured" artists from the artist field to the title
field.
* :doc:`keyfinder`: Use the `KeyFinder`_ program to detect the musical
key from the audio.
* :doc:`importadded`: Use file modification times for guessing the value for
the `added` field in the database.
* :doc:`lastgenre`: Fetch genres based on Last.fm tags.
* :doc:`lastimport`: Collect play counts from Last.fm.
* :doc:`lyrics`: Automatically fetch song lyrics.
* :doc:`mbsync`: Fetch updated metadata from MusicBrainz.
* :doc:`metasync`: Fetch metadata from local or remote sources
* :doc:`mpdstats`: Connect to `MPD`_ and update the beets library with play
statistics (last_played, play_count, skip_count, rating).
* :doc:`parentwork`: Fetch work titles and works they are part of.
* :doc:`replaygain`: Calculate volume normalization for players that support it.
* :doc:`scrub`: Clean extraneous metadata from music files.
* :doc:`zero`: Nullify fields by pattern or unconditionally.
:doc:`absubmit <absubmit>`
Analyse audio with the `streaming_extractor_music`_ program and submit the metadata to an AcousticBrainz server
:doc:`acousticbrainz <acousticbrainz>`
Fetch various AcousticBrainz metadata
:doc:`bpm <bpm>`
Measure tempo using keystrokes.
:doc:`bpsync <bpsync>`
Fetch updated metadata from Beatport.
:doc:`edit <edit>`
Edit metadata from a text editor.
:doc:`embedart <embedart>`
Embed album art images into files' metadata.
:doc:`fetchart <fetchart>`
Fetch album cover art from various sources.
:doc:`ftintitle <ftintitle>`
Move "featured" artists from the artist field to the title
field.
:doc:`keyfinder <keyfinder>`
Use the `KeyFinder`_ program to detect the musical
key from the audio.
:doc:`importadded <importadded>`
Use file modification times for guessing the value for
the `added` field in the database.
:doc:`lastgenre <lastgenre>`
Fetch genres based on Last.fm tags.
:doc:`lastimport <lastimport>`
Collect play counts from Last.fm.
:doc:`lyrics <lyrics>`
Automatically fetch song lyrics.
:doc:`mbsync <mbsync>`
Fetch updated metadata from MusicBrainz.
:doc:`metasync <metasync>`
Fetch metadata from local or remote sources
:doc:`mpdstats <mpdstats>`
Connect to `MPD`_ and update the beets library with play
statistics (last_played, play_count, skip_count, rating).
:doc:`parentwork <parentwork>`
Fetch work titles and works they are part of.
:doc:`replaygain <replaygain>`
Calculate volume normalization for players that support it.
:doc:`scrub <scrub>`
Clean extraneous metadata from music files.
:doc:`zero <zero>`
Nullify fields by pattern or unconditionally.
.. _KeyFinder: http://www.ibrahimshaath.co.uk/keyfinder/
.. _streaming_extractor_music: https://acousticbrainz.org/download
@ -178,37 +225,75 @@ Metadata
Path Formats
------------
* :doc:`albumtypes`: Format album type in path formats.
* :doc:`bucket`: Group your files into bucket directories that cover different
field values ranges.
* :doc:`inline`: Use Python snippets to customize path format strings.
* :doc:`rewrite`: Substitute values in path formats.
* :doc:`the`: Move patterns in path formats (i.e., move "a" and "the" to the
end).
:doc:`albumtypes <albumtypes>`
Format album type in path formats.
:doc:`bucket <bucket>`
Group your files into bucket directories that cover different
field values ranges.
:doc:`inline <inline>`
Use Python snippets to customize path format strings.
:doc:`rewrite <rewrite>`
Substitute values in path formats.
:doc:`the <the>`
Move patterns in path formats (i.e., move "a" and "the" to the
end).
Interoperability
----------------
* :doc:`aura`: A server implementation of the `AURA`_ specification.
* :doc:`badfiles`: Check audio file integrity.
* :doc:`embyupdate`: Automatically notifies `Emby`_ whenever the beets library changes.
* :doc:`fish`: Adds `Fish shell`_ tab autocompletion to ``beet`` commands.
* :doc:`importfeeds`: Keep track of imported files via ``.m3u`` playlist file(s) or symlinks.
* :doc:`ipfs`: Import libraries from friends and get albums from them via ipfs.
* :doc:`kodiupdate`: Automatically notifies `Kodi`_ whenever the beets library
changes.
* :doc:`mpdupdate`: Automatically notifies `MPD`_ whenever the beets library
changes.
* :doc:`play`: Play beets queries in your music player.
* :doc:`playlist`: Use M3U playlists to query the beets library.
* :doc:`plexupdate`: Automatically notifies `Plex`_ whenever the beets library
changes.
* :doc:`smartplaylist`: Generate smart playlists based on beets queries.
* :doc:`sonosupdate`: Automatically notifies `Sonos`_ whenever the beets library
changes.
* :doc:`thumbnails`: Get thumbnails with the cover art on your album folders.
* :doc:`subsonicupdate`: Automatically notifies `Subsonic`_ whenever the beets
library changes.
:doc:`aura <aura>`
A server implementation of the `AURA`_ specification.
:doc:`badfiles <badfiles>`
Check audio file integrity.
:doc:`embyupdate <embyupdate>`
Automatically notifies `Emby`_ whenever the beets library changes.
:doc:`fish <fish>`
Adds `Fish shell`_ tab autocompletion to ``beet`` commands.
:doc:`importfeeds <importfeeds>`
Keep track of imported files via ``.m3u`` playlist file(s) or symlinks.
:doc:`ipfs <ipfs>`
Import libraries from friends and get albums from them via ipfs.
:doc:`kodiupdate <kodiupdate>`
Automatically notifies `Kodi`_ whenever the beets library
changes.
:doc:`mpdupdate <mpdupdate>`
Automatically notifies `MPD`_ whenever the beets library
changes.
:doc:`play <play>`
Play beets queries in your music player.
:doc:`playlist <playlist>`
Use M3U playlists to query the beets library.
:doc:`plexupdate <plexupdate>`
Automatically notifies `Plex`_ whenever the beets library
changes.
:doc:`smartplaylist <smartplaylist>`
Generate smart playlists based on beets queries.
:doc:`sonosupdate <sonosupdate>`
Automatically notifies `Sonos`_ whenever the beets library
changes.
:doc:`thumbnails <thumbnails>`
Get thumbnails with the cover art on your album folders.
:doc:`subsonicupdate <subsonicupdate>`
Automatically notifies `Subsonic`_ whenever the beets
library changes.
.. _AURA: https://auraspec.readthedocs.io
@ -222,28 +307,65 @@ Interoperability
Miscellaneous
-------------
* :doc:`bareasc`: Search albums and tracks with bare ASCII string matching.
* :doc:`bpd`: A music player for your beets library that emulates `MPD`_ and is
compatible with `MPD clients`_.
* :doc:`convert`: Transcode music and embed album art while exporting to
a different directory.
* :doc:`duplicates`: List duplicate tracks or albums.
* :doc:`export`: Export data from queries to a format.
* :doc:`filefilter`: Automatically skip files during the import process based
on regular expressions.
* :doc:`fuzzy`: Search albums and tracks with fuzzy string matching.
* :doc:`hook`: Run a command when an event is emitted by beets.
* :doc:`ihate`: Automatically skip albums and tracks during the import process.
* :doc:`info`: Print music files' tags to the console.
* :doc:`loadext`: Load SQLite extensions.
* :doc:`mbcollection`: Maintain your MusicBrainz collection list.
* :doc:`mbsubmit`: Print an album's tracks in a MusicBrainz-friendly format.
* :doc:`missing`: List missing tracks.
* `mstream`_: A music streaming server + webapp that can be used alongside beets.
* :doc:`random`: Randomly choose albums and tracks from your library.
* :doc:`spotify`: Create Spotify playlists from the Beets library.
* :doc:`types`: Declare types for flexible attributes.
* :doc:`web`: An experimental Web-based GUI for beets.
:doc:`bareasc <bareasc>`
Search albums and tracks with bare ASCII string matching.
:doc:`bpd <bpd>`
A music player for your beets library that emulates `MPD`_ and is
compatible with `MPD clients`_.
:doc:`convert <convert>`
Transcode music and embed album art while exporting to
a different directory.
:doc:`duplicates <duplicates>`
List duplicate tracks or albums.
:doc:`export <export>`
Export data from queries to a format.
:doc:`filefilter <filefilter>`
Automatically skip files during the import process based
on regular expressions.
:doc:`fuzzy <fuzzy>`
Search albums and tracks with fuzzy string matching.
:doc:`hook <hook>`
Run a command when an event is emitted by beets.
:doc:`ihate <ihate>`
Automatically skip albums and tracks during the import process.
:doc:`info <info>`
Print music files' tags to the console.
:doc:`loadext <loadext>`
Load SQLite extensions.
:doc:`mbcollection <mbcollection>`
Maintain your MusicBrainz collection list.
:doc:`mbsubmit <mbsubmit>`
Print an album's tracks in a MusicBrainz-friendly format.
:doc:`missing <missing>`
List missing tracks.
`mstream`_
A music streaming server + webapp that can be used alongside beets.
:doc:`random <random>`
Randomly choose albums and tracks from your library.
:doc:`spotify <spotify>`
Create Spotify playlists from the Beets library.
:doc:`types <types>`
Declare types for flexible attributes.
:doc:`web <web>`
An experimental Web-based GUI for beets.
.. _MPD: https://www.musicpd.org/
.. _MPD clients: https://mpd.wikia.com/wiki/Clients
@ -270,76 +392,109 @@ line in your config file.
Here are a few of the plugins written by the beets community:
* `beets-alternatives`_ manages external files.
`beets-alternatives`_
Manages external files.
* `beet-amazon`_ adds Amazon.com as a tagger data source.
`beet-amazon`_
Adds Amazon.com as a tagger data source.
* `beets-artistcountry`_ fetches the artist's country of origin from
MusicBrainz.
`beets-artistcountry`_
Fetches the artist's country of origin from MusicBrainz.
* `beets-autofix`_ automates repetitive tasks to keep your library in order.
`beets-autofix`_
Automates repetitive tasks to keep your library in order.
* `beets-audible`_ adds Audible as a tagger data source and provides
other features for managing audiobook collections.
`beets-audible`_
Adds Audible as a tagger data source and provides
other features for managing audiobook collections.
* `beets-barcode`_ lets you scan or enter barcodes for physical media to
search for their metadata.
`beets-barcode`_
Lets you scan or enter barcodes for physical media to
search for their metadata.
* `beetcamp`_ enables **bandcamp.com** autotagger with a fairly extensive amount of metadata.
`beetcamp`_
Enables **bandcamp.com** autotagger with a fairly extensive amount of metadata.
* `beetstream`_ is server implementation of the `SubSonic API`_ specification, allowing you to stream your music on a multitude of clients.
`beetstream`_
Is server implementation of the `SubSonic API`_ specification, allowing you to stream your music on a multitude of clients.
* `beets-bpmanalyser`_ analyses songs and calculates their tempo (BPM).
`beets-bpmanalyser`_
Analyses songs and calculates their tempo (BPM).
* `beets-check`_ automatically checksums your files to detect corruption.
`beets-check`_
Automatically checksums your files to detect corruption.
* `A cmus plugin`_ integrates with the `cmus`_ console music player.
`A cmus plugin`_
Integrates with the `cmus`_ console music player.
* `beets-copyartifacts`_ helps bring non-music files along during import.
`beets-copyartifacts`_
Helps bring non-music files along during import.
* `beets-describe`_ gives you the full picture of a single attribute of your library items.
`beets-describe`_
Gives you the full picture of a single attribute of your library items.
* `drop2beets`_ automatically imports singles as soon as they are dropped in a
folder (using Linux's ``inotify``). You can also set a sub-folders
hierarchy to set flexible attributes by the way.
`drop2beets`_
Automatically imports singles as soon as they are dropped in a
folder (using Linux's ``inotify``). You can also set a sub-folders
hierarchy to set flexible attributes by the way.
* `dsedivec`_ has two plugins: ``edit`` and ``moveall``.
`dsedivec`_
Has two plugins: ``edit`` and ``moveall``.
* `beets-follow`_ lets you check for new albums from artists you like.
`beets-follow`_
Lets you check for new albums from artists you like.
* `beetFs`_ is a FUSE filesystem for browsing the music in your beets library.
(Might be out of date.)
`beetFs`_
Is a FUSE filesystem for browsing the music in your beets library.
(Might be out of date.)
* `beets-goingrunning`_ generates playlists to go with your running sessions.
`beets-goingrunning`_
Generates playlists to go with your running sessions.
* `beets-ibroadcast`_ uploads tracks to the `iBroadcast`_ cloud service.
`beets-ibroadcast`_
Uploads tracks to the `iBroadcast`_ cloud service.
* `beets-importreplace`_ lets you perform regex replacements on incoming
metadata.
`beets-importreplace`_
Lets you perform regex replacements on incoming
metadata.
* `beets-mosaic`_ generates a montage of a mosaic from cover art.
`beets-mosaic`_
Generates a montage of a mosaic from cover art.
* `beets-noimport`_ adds and removes directories from the incremental import skip list.
`beets-noimport`_
Adds and removes directories from the incremental import skip list.
* `beets-originquery`_ augments MusicBrainz queries with locally-sourced data
to improve autotagger results.
`beets-originquery`_
Augments MusicBrainz queries with locally-sourced data
to improve autotagger results.
* `beets-popularity`_ fetches popularity values from Deezer.
`beets-plexsync`_
Allows you to sync your Plex library with your beets library, create smart playlists in Plex, and import online playlists (from services like Spotify) into Plex.
* `beets-setlister`_ generate playlists from the setlists of a given artist.
`beets-popularity`_
Fetches popularity values from Deezer.
* `beet-summarize`_ can compute lots of counts and statistics about your music
library.
`beets-setlister`_
Generate playlists from the setlists of a given artist.
* `beets-usertag`_ lets you use keywords to tag and organize your music.
`beet-summarize`_
Can compute lots of counts and statistics about your music
library.
* `whatlastgenre`_ fetches genres from various music sites.
`beets-usertag`_
Lets you use keywords to tag and organize your music.
* `beets-xtractor`_ extracts low- and high-level musical information from your songs.
`whatlastgenre`_
Fetches genres from various music sites.
* `beets-ydl`_ downloads audio from youtube-dl sources and import into beets.
`beets-xtractor`_
Extracts low- and high-level musical information from your songs.
* `beets-yearfixer`_ attempts to fix all missing ``original_year`` and ``year`` fields.
`beets-ydl`_
Downloads audio from youtube-dl sources and import into beets.
`beets-yearfixer`_
Attempts to fix all missing ``original_year`` and ``year`` fields.
.. _beets-barcode: https://github.com/8h2a/beets-barcode
.. _beetcamp: https://github.com/snejus/beetcamp
@ -366,6 +521,7 @@ Here are a few of the plugins written by the beets community:
.. _whatlastgenre: https://github.com/YetAnotherNerd/whatlastgenre/tree/master/plugin/beets
.. _beets-usertag: https://github.com/igordertigor/beets-usertag
.. _beets-popularity: https://github.com/abba23/beets-popularity
.. _beets-plexsync: https://github.com/arsaboo/beets-plexsync
.. _beets-ydl: https://github.com/vmassuchetto/beets-ydl
.. _beet-summarize: https://github.com/steven-murray/beet-summarize
.. _beets-mosaic: https://github.com/SusannaMaria/beets-mosaic

View file

@ -8,18 +8,22 @@ To use ``plexupdate`` plugin, enable it in your configuration
(see :ref:`using-plugins`).
Then, you'll probably want to configure the specifics of your Plex server.
You can do that using an ``plex:`` section in your ``config.yaml``,
which looks like this::
which looks like this:
plex:
host: localhost
port: 32400
token: token
.. code-block:: yaml
plex:
host: "localhost"
port: 32400
token: "TOKEN"
The ``token`` key is optional: you'll need to use it when in a Plex Home (see Plex's own `documentation about tokens`_).
To use the ``plexupdate`` plugin you need to install the `requests`_ library with:
pip install requests
.. code-block:: console
$ pip install beets[plexupdate]
With that all in place, you'll see beets send the "update" command to your Plex
server every time you change your beets library.
@ -44,4 +48,4 @@ The available options under the ``plex:`` section are:
- **secure**: Use secure connections to the Plex server.
Default: ``False``
- **ignore_cert_errors**: Ignore TLS certificate errors when using secure connections.
Default: ``False``
Default: ``False``

View file

@ -82,6 +82,17 @@ automatically notify MPD of the playlist change, by adding ``mpdupdate`` to
the ``plugins`` line in your config file *after* the ``smartplaylist``
plugin.
While changing existing playlists in the beets configuration it can help to use
the ``--pretend`` option to find out if the edits work as expected. The results
of the queries will be printed out instead of being written to the playlist
file.
$ beet splupdate --pretend BeatlesUniverse.m3u
The ``pretend_paths`` configuration option sets whether the items should be
displayed as per the user's ``format_item`` setting or what the file
paths as they would be written to the m3u file look like.
Configuration
-------------
@ -105,3 +116,5 @@ other configuration options are:
example, you could use the URL for a server where the music is stored.
Default: empty string.
- **urlencoded**: URL-encode all paths. Default: ``no``.
- **pretend_paths**: When running with ``--pretend``, show the actual file
paths that will be written to the m3u file. Default: ``false``.

View file

@ -135,7 +135,7 @@ unexpected behavior on all popular platforms::
These substitutions remove forward and back slashes, leading dots, and
control characters—all of which is a good idea on any OS. The fourth line
removes the Windows "reserved characters" (useful even on Unix for for
removes the Windows "reserved characters" (useful even on Unix for
compatibility with Windows-influenced network filesystems like Samba).
Trailing dots and trailing whitespace, which can cause problems on Windows
clients, are also removed.

View file

@ -179,10 +179,10 @@ setup(
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: Implementation :: CPython',
],
)

View file

@ -453,7 +453,7 @@ class TestHelper:
def run_with_output(self, *args):
with capture_stdout() as out:
self.run_command(*args)
return util.text_string(out.getvalue())
return out.getvalue()
# Safe file operations

Binary file not shown.

View file

@ -106,6 +106,6 @@ class AlbumTypesPluginTest(unittest.TestCase, TestHelper):
def _create_album(self, album_types: [str], artist_id: str = 0):
return self.add_album(
albumtypes='; '.join(album_types),
albumtypes=album_types,
mb_albumartistid=artist_id
)

View file

@ -77,6 +77,19 @@ class StoreTest(_common.LibTestCase):
self.i.store()
self.assertTrue('composer' not in self.i._dirty)
def test_store_album_cascades_flex_deletes(self):
album = _common.album()
album.flex1 = "Flex-1"
self.lib.add(album)
item = _common.item()
item.album_id = album.id
item.flex1 = "Flex-1"
self.lib.add(item)
del album.flex1
album.store()
self.assertNotIn('flex1', album)
self.assertNotIn('flex1', album.items()[0])
class AddTest(_common.TestCase):
def setUp(self):

View file

@ -15,8 +15,8 @@
"""Various tests for querying the library database.
"""
from contextlib import contextmanager
from functools import partial
from unittest.mock import patch
import os
import sys
import unittest
@ -454,23 +454,14 @@ class PathQueryTest(_common.LibTestCase, TestHelper, AssertsMixin):
self.lib.add(i2)
self.lib.add_album([i2])
@contextmanager
def force_implicit_query_detection(self):
# Unadorned path queries with path separators in them are considered
# path queries only when the path in question actually exists. So we
# mock the existence check to return true.
self.patcher_exists = patch('beets.library.os.path.exists')
self.patcher_exists.start().return_value = True
# We have to create function samefile as it does not exist on
# Windows and python 2.7
self.patcher_samefile = patch('beets.library.os.path.samefile',
create=True)
self.patcher_samefile.start().return_value = True
def tearDown(self):
super().tearDown()
self.patcher_samefile.stop()
self.patcher_exists.stop()
beets.library.PathQuery.force_implicit_query_detection = True
yield
beets.library.PathQuery.force_implicit_query_detection = False
def test_path_exact_match(self):
q = 'path:/a/b/c.mp3'
@ -526,31 +517,35 @@ class PathQueryTest(_common.LibTestCase, TestHelper, AssertsMixin):
@unittest.skipIf(sys.platform == 'win32', WIN32_NO_IMPLICIT_PATHS)
def test_slashed_query_matches_path(self):
q = '/a/b'
results = self.lib.items(q)
self.assert_items_matched(results, ['path item'])
with self.force_implicit_query_detection():
q = '/a/b'
results = self.lib.items(q)
self.assert_items_matched(results, ['path item'])
results = self.lib.albums(q)
self.assert_albums_matched(results, ['path album'])
results = self.lib.albums(q)
self.assert_albums_matched(results, ['path album'])
@unittest.skipIf(sys.platform == 'win32', WIN32_NO_IMPLICIT_PATHS)
def test_path_query_in_or_query(self):
q = '/a/b , /a/b'
results = self.lib.items(q)
self.assert_items_matched(results, ['path item'])
with self.force_implicit_query_detection():
q = '/a/b , /a/b'
results = self.lib.items(q)
self.assert_items_matched(results, ['path item'])
def test_non_slashed_does_not_match_path(self):
q = 'c.mp3'
results = self.lib.items(q)
self.assert_items_matched(results, [])
with self.force_implicit_query_detection():
q = 'c.mp3'
results = self.lib.items(q)
self.assert_items_matched(results, [])
results = self.lib.albums(q)
self.assert_albums_matched(results, [])
results = self.lib.albums(q)
self.assert_albums_matched(results, [])
def test_slashes_in_explicit_field_does_not_match_path(self):
q = 'title:/a/b'
results = self.lib.items(q)
self.assert_items_matched(results, [])
with self.force_implicit_query_detection():
q = 'title:/a/b'
results = self.lib.items(q)
self.assert_items_matched(results, [])
def test_path_item_regex(self):
q = 'path::c\\.mp3$'
@ -603,101 +598,67 @@ class PathQueryTest(_common.LibTestCase, TestHelper, AssertsMixin):
results = self.lib.items(makeq(case_sensitive=False))
self.assert_items_matched(results, ['path item', 'caps path'])
# Check for correct case sensitivity selection (this check
# only works on non-Windows OSes).
with _common.system_mock('Darwin'):
# exists = True and samefile = True => Case insensitive
q = makeq()
self.assertEqual(q.case_sensitive, False)
# FIXME: Also create a variant of this test for windows, which tests
# both os.sep and os.altsep
@unittest.skipIf(sys.platform == 'win32', 'win32')
def test_path_sep_detection(self):
is_path_query = beets.library.PathQuery.is_path_query
# exists = True and samefile = False => Case sensitive
self.patcher_samefile.stop()
self.patcher_samefile.start().return_value = False
try:
q = makeq()
self.assertEqual(q.case_sensitive, True)
finally:
self.patcher_samefile.stop()
self.patcher_samefile.start().return_value = True
# Test platform-aware default sensitivity when the library path
# does not exist. For the duration of this check, we change the
# `os.path.exists` mock to return False.
self.patcher_exists.stop()
self.patcher_exists.start().return_value = False
try:
with _common.system_mock('Darwin'):
q = makeq()
self.assertEqual(q.case_sensitive, True)
with _common.system_mock('Windows'):
q = makeq()
self.assertEqual(q.case_sensitive, False)
finally:
# Restore the `os.path.exists` mock to its original state.
self.patcher_exists.stop()
self.patcher_exists.start().return_value = True
@patch('beets.library.os')
def test_path_sep_detection(self, mock_os):
mock_os.sep = '/'
mock_os.altsep = None
mock_os.path.exists = lambda p: True
is_path = beets.library.PathQuery.is_path_query
self.assertTrue(is_path('/foo/bar'))
self.assertTrue(is_path('foo/bar'))
self.assertTrue(is_path('foo/'))
self.assertFalse(is_path('foo'))
self.assertTrue(is_path('foo/:bar'))
self.assertFalse(is_path('foo:bar/'))
self.assertFalse(is_path('foo:/bar'))
with self.force_implicit_query_detection():
self.assertTrue(is_path_query('/foo/bar'))
self.assertTrue(is_path_query('foo/bar'))
self.assertTrue(is_path_query('foo/'))
self.assertFalse(is_path_query('foo'))
self.assertTrue(is_path_query('foo/:bar'))
self.assertFalse(is_path_query('foo:bar/'))
self.assertFalse(is_path_query('foo:/bar'))
# FIXME: shouldn't this also work on windows?
@unittest.skipIf(sys.platform == 'win32', WIN32_NO_IMPLICIT_PATHS)
def test_detect_absolute_path(self):
# Don't patch `os.path.exists`; we'll actually create a file when
# it exists.
self.patcher_exists.stop()
is_path = beets.library.PathQuery.is_path_query
"""Test detection of implicit path queries based on whether or
not the path actually exists, when using an absolute path query.
try:
path = self.touch(os.path.join(b'foo', b'bar'))
path = path.decode('utf-8')
Thus, don't use the `force_implicit_query_detection()`
contextmanager which would disable the existence check.
"""
is_path_query = beets.library.PathQuery.is_path_query
# The file itself.
self.assertTrue(is_path(path))
path = self.touch(os.path.join(b'foo', b'bar'))
self.assertTrue(os.path.isabs(util.syspath(path)))
path_str = path.decode('utf-8')
# The parent directory.
parent = os.path.dirname(path)
self.assertTrue(is_path(parent))
# The file itself.
self.assertTrue(is_path_query(path_str))
# Some non-existent path.
self.assertFalse(is_path(path + 'baz'))
# The parent directory.
parent = os.path.dirname(path_str)
self.assertTrue(is_path_query(parent))
finally:
# Restart the `os.path.exists` patch.
self.patcher_exists.start()
# Some non-existent path.
self.assertFalse(is_path_query(path_str + 'baz'))
def test_detect_relative_path(self):
self.patcher_exists.stop()
is_path = beets.library.PathQuery.is_path_query
"""Test detection of implicit path queries based on whether or
not the path actually exists, when using a relative path query.
Thus, don't use the `force_implicit_query_detection()`
contextmanager which would disable the existence check.
"""
is_path_query = beets.library.PathQuery.is_path_query
self.touch(os.path.join(b'foo', b'bar'))
# Temporarily change directory so relative paths work.
cur_dir = os.getcwd()
try:
self.touch(os.path.join(b'foo', b'bar'))
# Temporarily change directory so relative paths work.
cur_dir = os.getcwd()
try:
os.chdir(self.temp_dir)
self.assertTrue(is_path('foo/'))
self.assertTrue(is_path('foo/bar'))
self.assertTrue(is_path('foo/bar:tagada'))
self.assertFalse(is_path('bar'))
finally:
os.chdir(cur_dir)
os.chdir(self.temp_dir)
self.assertTrue(is_path_query('foo/'))
self.assertTrue(is_path_query('foo/bar'))
self.assertTrue(is_path_query('foo/bar:tagada'))
self.assertFalse(is_path_query('bar'))
finally:
self.patcher_exists.start()
os.chdir(cur_dir)
class IntQueryTest(unittest.TestCase, TestHelper):

View file

@ -701,27 +701,30 @@ class UpdateTest(_common.TestCase):
item = self.lib.items().get()
self.assertEqual(item.title, 'full')
@unittest.expectedFailure
def test_multivalued_albumtype_roundtrip(self):
# https://github.com/beetbox/beets/issues/4528
# albumtypes is empty for our test fixtures, so populate it first
album = self.album
# setting albumtypes does not set albumtype currently...
# FIXME: When actually fixing the issue 4528, consider whether this
# should be set to "album" or ["album"]
album.albumtype = "album"
album.albumtypes = "album"
correct_albumtypes = ["album", "live"]
# Setting albumtypes does not set albumtype, currently.
# Using x[0] mirrors https://github.com/beetbox/mediafile/blob/057432ad53b3b84385e5582f69f44dc00d0a725d/mediafile.py#L1928 # noqa: E501
correct_albumtype = correct_albumtypes[0]
album.albumtype = correct_albumtype
album.albumtypes = correct_albumtypes
album.try_sync(write=True, move=False)
album.load()
albumtype_before = album.albumtype
self.assertEqual(albumtype_before, "album")
self.assertEqual(album.albumtype, correct_albumtype)
self.assertEqual(album.albumtypes, correct_albumtypes)
self._update()
album.load()
self.assertEqual(albumtype_before, album.albumtype)
self.assertEqual(album.albumtype, correct_albumtype)
self.assertEqual(album.albumtypes, correct_albumtypes)
class PrintTest(_common.TestCase):
@ -1185,8 +1188,7 @@ class ShowChangeTest(_common.TestCase):
cur_album,
autotag.AlbumMatch(album_dist, info, mapping, set(), set()),
)
# FIXME decoding shouldn't be done here
return util.text_string(self.io.getoutput().lower())
return self.io.getoutput().lower()
def test_null_change(self):
msg = self._show_change()

View file

@ -14,10 +14,11 @@
"""Tests for base utils from the beets.util package.
"""
import sys
import re
import os
import platform
import re
import subprocess
import sys
import unittest
from unittest.mock import patch, Mock
@ -122,6 +123,28 @@ class UtilTest(unittest.TestCase):
self.assertEqual(exc_context.exception.returncode, 1)
self.assertEqual(exc_context.exception.cmd, 'taga \xc3\xa9')
def test_case_sensitive_default(self):
path = util.bytestring_path(util.normpath(
"/this/path/does/not/exist",
))
self.assertEqual(
util.case_sensitive(path),
platform.system() != 'Windows',
)
@unittest.skipIf(sys.platform == 'win32', 'fs is not case sensitive')
def test_case_sensitive_detects_sensitive(self):
# FIXME: Add tests for more code paths of case_sensitive()
# when the filesystem on the test runner is not case sensitive
pass
@unittest.skipIf(sys.platform != 'win32', 'fs is case sensitive')
def test_case_sensitive_detects_insensitive(self):
# FIXME: Add tests for more code paths of case_sensitive()
# when the filesystem on the test runner is case sensitive
pass
class PathConversionTest(_common.TestCase):
def test_syspath_windows_format(self):