Merge branch 'master' into embedart-clear-improvements

This commit is contained in:
Serene 2026-01-20 08:43:30 +10:00 committed by GitHub
commit 39f65f6b11
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
111 changed files with 2632 additions and 1236 deletions

View file

@ -81,7 +81,17 @@ d93ddf8dd43e4f9ed072a03829e287c78d2570a2
59c93e70139f70e9fd1c6f3c1bceb005945bec33
# Moved ui.commands._utils into ui.commands.utils
25ae330044abf04045e3f378f72bbaed739fb30d
# Refactor test_ui_command.py into multiple modules
# Refactor test_ui_command.py into multiple modules
a59e41a88365e414db3282658d2aa456e0b3468a
# pyupgrade Python 3.10
301637a1609831947cb5dd90270ed46c24b1ab1b
# Fix changelog formatting
658b184c59388635787b447983ecd3a575f4fe56
# Configure future-annotations
ac7f3d9da95c2d0a32e5c908ea68480518a1582d
# Configure ruff for py310
c46069654628040316dea9db85d01b263db3ba9e
# Enable RUF rules
4749599913a42e02e66b37db9190de11d6be2cdf
# Address RUF012
bc71ec308eb938df1d349f6857634ddf2a82e339

View file

@ -66,7 +66,7 @@ jobs:
- if: ${{ env.IS_MAIN_PYTHON != 'true' }}
name: Test without coverage
run: |
poetry install --without=lint --extras=autobpm --extras=lyrics --extras=replaygain --extras=reflink --extras=fetchart --extras=chroma --extras=sonosupdate --extras=parentwork
poetry install --without=lint --extras=autobpm --extras=lyrics --extras=replaygain --extras=reflink --extras=fetchart --extras=chroma --extras=sonosupdate
poe test
- if: ${{ env.IS_MAIN_PYTHON == 'true' }}
@ -74,7 +74,7 @@ jobs:
env:
LYRICS_UPDATED: ${{ steps.lyrics-update.outputs.any_changed }}
run: |
poetry install --extras=autobpm --extras=lyrics --extras=docs --extras=replaygain --extras=reflink --extras=fetchart --extras=chroma --extras=sonosupdate --extras=parentwork
poetry install --extras=autobpm --extras=lyrics --extras=docs --extras=replaygain --extras=reflink --extras=fetchart --extras=chroma --extras=sonosupdate
poe docs
poe test-with-coverage

View file

@ -25,7 +25,7 @@ import lap
import numpy as np
from beets import config, logging, metadata_plugins, plugins
from beets.autotag import AlbumInfo, AlbumMatch, TrackInfo, TrackMatch, hooks
from beets.autotag import AlbumMatch, TrackMatch, hooks
from beets.util import get_most_common_tags
from .distance import VA_ARTISTS, distance, track_distance
@ -33,6 +33,7 @@ from .distance import VA_ARTISTS, distance, track_distance
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
from beets.autotag import AlbumInfo, TrackInfo
from beets.library import Item
# Global logger.

View file

@ -26,17 +26,10 @@ import threading
import time
from abc import ABC
from collections import defaultdict
from collections.abc import (
Callable,
Generator,
Iterable,
Iterator,
Mapping,
Sequence,
)
from collections.abc import Mapping
from functools import cached_property
from sqlite3 import Connection, sqlite_version_info
from typing import TYPE_CHECKING, Any, AnyStr, Generic
from sqlite3 import sqlite_version_info
from typing import TYPE_CHECKING, Any, AnyStr, ClassVar, Generic
from typing_extensions import (
Self,
@ -48,20 +41,20 @@ import beets
from ..util import cached_classproperty, functemplate
from . import types
from .query import (
FieldQueryType,
FieldSort,
MatchQuery,
NullSort,
Query,
Sort,
TrueQuery,
)
from .query import MatchQuery, NullSort, TrueQuery
if TYPE_CHECKING:
from collections.abc import (
Callable,
Generator,
Iterable,
Iterator,
Sequence,
)
from sqlite3 import Connection
from types import TracebackType
from .query import SQLiteType
from .query import FieldQueryType, FieldSort, Query, Sort, SQLiteType
D = TypeVar("D", bound="Database", default=Any)
@ -306,7 +299,7 @@ class Model(ABC, Generic[D]):
"""The flex field SQLite table name.
"""
_fields: dict[str, types.Type] = {}
_fields: ClassVar[dict[str, types.Type]] = {}
"""A mapping indicating available "fixed" fields on this type. The
keys are field names and the values are `Type` objects.
"""
@ -321,7 +314,7 @@ class Model(ABC, Generic[D]):
"""Optional types for non-fixed (flexible and computed) fields."""
return {}
_sorts: dict[str, type[FieldSort]] = {}
_sorts: ClassVar[dict[str, type[FieldSort]]] = {}
"""Optional named sort criteria. The keys are strings and the values
are subclasses of `Sort`.
"""
@ -1124,6 +1117,16 @@ class Database:
# call conn.close() in _close()
check_same_thread=False,
)
if sys.version_info >= (3, 12) and sqlite3.sqlite_version_info >= (
3,
29,
0,
):
# If possible, disable double-quoted strings
conn.setconfig(sqlite3.SQLITE_DBCONFIG_DQS_DDL, 0)
conn.setconfig(sqlite3.SQLITE_DBCONFIG_DQS_DML, 0)
self.add_functions(conn)
if self.supports_extensions:

View file

@ -20,17 +20,19 @@ import os
import re
import unicodedata
from abc import ABC, abstractmethod
from collections.abc import Iterator, MutableSequence, Sequence
from collections.abc import Sequence
from datetime import datetime, timedelta
from functools import cached_property, reduce
from operator import mul, or_
from re import Pattern
from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union
from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypeVar
from beets import util
from beets.util.units import raw_seconds_short
if TYPE_CHECKING:
from collections.abc import Iterator, MutableSequence
from beets.dbcore.db import AnyModel, Model
P = TypeVar("P", default=Any)
@ -122,7 +124,7 @@ class Query(ABC):
return hash(type(self))
SQLiteType = Union[str, bytes, float, int, memoryview, None]
SQLiteType = str | bytes | float | int | memoryview | None
AnySQLiteType = TypeVar("AnySQLiteType", bound=SQLiteType)
FieldQueryType = type["FieldQuery"]
@ -689,7 +691,12 @@ class Period:
("%Y-%m-%dT%H:%M", "%Y-%m-%d %H:%M"), # minute
("%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"), # second
)
relative_units = {"y": 365, "m": 30, "w": 7, "d": 1}
relative_units: ClassVar[dict[str, int]] = {
"y": 365,
"m": 30,
"w": 7,
"d": 1,
}
relative_re = "(?P<sign>[+|-]?)(?P<quantity>[0-9]+)(?P<timespan>[y|m|w|d])"
def __init__(self, date: datetime, precision: str):

View file

@ -250,7 +250,7 @@ def parse_sorted_query(
# Split up query in to comma-separated subqueries, each representing
# an AndQuery, which need to be joined together in one OrQuery
subquery_parts = []
for part in parts + [","]:
for part in [*parts, ","]:
if part.endswith(","):
# Ensure we can catch "foo, bar" as well as "foo , bar"
last_subquery_part = part[:-1]

View file

@ -20,7 +20,7 @@ import re
import time
import typing
from abc import ABC
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast
from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypeVar, cast
import beets
from beets import util
@ -406,7 +406,7 @@ class MusicalKey(String):
The standard format is C, Cm, C#, C#m, etc.
"""
ENHARMONIC = {
ENHARMONIC: ClassVar[dict[str, str]] = {
r"db": "c#",
r"eb": "d#",
r"gb": "f#",

View file

@ -28,11 +28,11 @@ from .tasks import (
# Note: Stages are not exposed to the public API
__all__ = [
"ImportSession",
"ImportAbortError",
"Action",
"ImportTask",
"ArchiveImportTask",
"ImportAbortError",
"ImportSession",
"ImportTask",
"SentinelImportTask",
"SingletonImportTask",
]

View file

@ -17,7 +17,7 @@ import os
import time
from typing import TYPE_CHECKING
from beets import config, dbcore, library, logging, plugins, util
from beets import config, logging, plugins, util
from beets.importer.tasks import Action
from beets.util import displayable_path, normpath, pipeline, syspath
@ -27,6 +27,7 @@ from .state import ImportState
if TYPE_CHECKING:
from collections.abc import Sequence
from beets import dbcore, library
from beets.util import PathBytes
from .tasks import ImportTask

View file

@ -388,5 +388,5 @@ def _extend_pipeline(tasks, *stages):
else:
task_iter = tasks
ipl = pipeline.Pipeline([task_iter] + list(stages))
ipl = pipeline.Pipeline([task_iter, *list(stages)])
return pipeline.multiple(ipl.pull())

View file

@ -20,7 +20,7 @@ import re
import shutil
import time
from collections import defaultdict
from collections.abc import Callable, Iterable, Sequence
from collections.abc import Callable
from enum import Enum
from tempfile import mkdtemp
from typing import TYPE_CHECKING, Any
@ -33,6 +33,8 @@ from beets.dbcore.query import PathQuery
from .state import ImportState
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
from beets.autotag.match import Recommendation
from .session import ImportSession
@ -232,7 +234,7 @@ class ImportTask(BaseImportTask):
or APPLY (in which case the data comes from the choice).
"""
if self.choice_flag in (Action.ASIS, Action.RETAG):
likelies, consensus = util.get_most_common_tags(self.items)
likelies, _ = util.get_most_common_tags(self.items)
return likelies
elif self.choice_flag is Action.APPLY and self.match:
return self.match.info.copy()
@ -678,6 +680,8 @@ class SingletonImportTask(ImportTask):
return [self.item]
def apply_metadata(self):
if config["import"]["from_scratch"]:
self.item.clear()
autotag.apply_item_metadata(self.item, self.match.info)
def _emit_imported(self, lib):
@ -890,7 +894,7 @@ class ArchiveImportTask(SentinelImportTask):
# The (0, 0, -1) is added to date_time because the
# function time.mktime expects a 9-element tuple.
# The -1 indicates that the DST flag is unknown.
date_time = time.mktime(f.date_time + (0, 0, -1))
date_time = time.mktime((*f.date_time, 0, 0, -1))
fullpath = os.path.join(extract_to, f.filename)
os.utime(fullpath, (date_time, date_time))

View file

@ -17,13 +17,13 @@ def __getattr__(name: str):
__all__ = [
"Library",
"LibModel",
"Album",
"Item",
"parse_query_parts",
"parse_query_string",
"FileOperationError",
"Item",
"LibModel",
"Library",
"ReadError",
"WriteError",
"parse_query_parts",
"parse_query_string",
]

View file

@ -7,7 +7,7 @@ import time
import unicodedata
from functools import cached_property
from pathlib import Path
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, ClassVar
from mediafile import MediaFile, UnreadableFileError
@ -229,7 +229,7 @@ class Album(LibModel):
_table = "albums"
_flex_table = "album_attributes"
_always_dirty = True
_fields = {
_fields: ClassVar[dict[str, types.Type]] = {
"id": types.PRIMARY_ID,
"artpath": types.NullPathType(),
"added": types.DATE,
@ -281,13 +281,13 @@ class Album(LibModel):
def _types(cls) -> dict[str, types.Type]:
return {**super()._types, "path": types.PathType()}
_sorts = {
_sorts: ClassVar[dict[str, type[dbcore.query.FieldSort]]] = {
"albumartist": dbcore.query.SmartArtistSort,
"artist": dbcore.query.SmartArtistSort,
}
# List of keys that are set on an album's items.
item_keys = [
item_keys: ClassVar[list[str]] = [
"added",
"albumartist",
"albumartists",
@ -624,7 +624,7 @@ class Item(LibModel):
_table = "items"
_flex_table = "item_attributes"
_fields = {
_fields: ClassVar[dict[str, types.Type]] = {
"id": types.PRIMARY_ID,
"path": types.PathType(),
"album_id": types.FOREIGN_ID,
@ -744,7 +744,9 @@ class Item(LibModel):
_formatter = FormattedItemMapping
_sorts = {"artist": dbcore.query.SmartArtistSort}
_sorts: ClassVar[dict[str, type[dbcore.query.FieldSort]]] = {
"artist": dbcore.query.SmartArtistSort
}
@cached_classproperty
def _queries(cls) -> dict[str, FieldQueryType]:

View file

@ -35,10 +35,25 @@ from logging import (
Handler,
Logger,
NullHandler,
RootLogger,
StreamHandler,
)
from typing import TYPE_CHECKING, Any, TypeVar, Union, overload
from typing import TYPE_CHECKING, Any, TypeVar, overload
if TYPE_CHECKING:
from collections.abc import Mapping
from logging import RootLogger
from types import TracebackType
T = TypeVar("T")
# see https://github.com/python/typeshed/blob/main/stdlib/logging/__init__.pyi
_SysExcInfoType = (
tuple[type[BaseException], BaseException, TracebackType | None]
| tuple[None, None, None]
)
_ExcInfoType = _SysExcInfoType | BaseException | bool | None
_ArgsType = tuple[object, ...] | Mapping[str, object]
__all__ = [
"DEBUG",
@ -54,21 +69,6 @@ __all__ = [
"getLogger",
]
if TYPE_CHECKING:
from collections.abc import Mapping
T = TypeVar("T")
from types import TracebackType
# see https://github.com/python/typeshed/blob/main/stdlib/logging/__init__.pyi
_SysExcInfoType = Union[
tuple[type[BaseException], BaseException, Union[TracebackType, None]],
tuple[None, None, None],
]
_ExcInfoType = Union[None, bool, _SysExcInfoType, BaseException]
_ArgsType = Union[tuple[object, ...], Mapping[str, object]]
# Regular expression to match:
# - C0 control characters (0x00-0x1F) except useful whitespace (\t, \n, \r)
# - DEL control character (0x7f)

View file

@ -141,7 +141,13 @@ class PluginLogFilter(logging.Filter):
# Managing the plugins themselves.
class BeetsPlugin(metaclass=abc.ABCMeta):
class BeetsPluginMeta(abc.ABCMeta):
template_funcs: ClassVar[TFuncMap[str]] = {}
template_fields: ClassVar[TFuncMap[Item]] = {}
album_template_fields: ClassVar[TFuncMap[Album]] = {}
class BeetsPlugin(metaclass=BeetsPluginMeta):
"""The base class for all beets plugins. Plugins provide
functionality by defining a subclass of BeetsPlugin and overriding
the abstract methods defined here.
@ -151,9 +157,10 @@ class BeetsPlugin(metaclass=abc.ABCMeta):
list
)
listeners: ClassVar[dict[EventType, list[Listener]]] = defaultdict(list)
template_funcs: ClassVar[TFuncMap[str]] | TFuncMap[str] = {} # type: ignore[valid-type]
template_fields: ClassVar[TFuncMap[Item]] | TFuncMap[Item] = {} # type: ignore[valid-type]
album_template_fields: ClassVar[TFuncMap[Album]] | TFuncMap[Album] = {} # type: ignore[valid-type]
template_funcs: TFuncMap[str]
template_fields: TFuncMap[Item]
album_template_fields: TFuncMap[Album]
name: str
config: ConfigView
@ -161,7 +168,7 @@ class BeetsPlugin(metaclass=abc.ABCMeta):
import_stages: list[ImportStageFunc]
def __init_subclass__(cls) -> None:
"""Enable legacy metadatasource plugins to work with the new interface.
"""Enable legacy metadata source plugins to work with the new interface.
When a plugin subclass of BeetsPlugin defines a `data_source` attribute
but does not inherit from MetadataSourcePlugin, this hook:
@ -220,14 +227,10 @@ class BeetsPlugin(metaclass=abc.ABCMeta):
self.name = name or self.__module__.split(".")[-1]
self.config = beets.config[self.name]
# If the class attributes are not set, initialize as instance attributes.
# TODO: Revise with v3.0.0, see also type: ignore[valid-type] above
if not self.template_funcs:
self.template_funcs = {}
if not self.template_fields:
self.template_fields = {}
if not self.album_template_fields:
self.album_template_fields = {}
# create per-instance storage for template fields and functions
self.template_funcs = {}
self.template_fields = {}
self.album_template_fields = {}
self.early_import_stages = []
self.import_stages = []

View file

@ -120,7 +120,7 @@ def capture_stdout():
def has_program(cmd, args=["--version"]):
"""Returns `True` if `cmd` can be executed."""
full_cmd = [cmd] + args
full_cmd = [cmd, *args]
try:
with open(os.devnull, "wb") as devnull:
subprocess.check_call(
@ -526,7 +526,7 @@ class ImportHelper(TestHelper):
autotagging library and several assertions for the library.
"""
default_import_config = {
default_import_config: ClassVar[dict[str, bool]] = {
"autotag": True,
"copy": True,
"hardlink": False,
@ -882,7 +882,7 @@ class FetchImageHelper:
def run(self, *args, **kwargs):
super().run(*args, **kwargs)
IMAGEHEADER: dict[str, bytes] = {
IMAGEHEADER: ClassVar[dict[str, bytes]] = {
"image/jpeg": b"\xff\xd8\xff\x00\x00\x00JFIF",
"image/png": b"\211PNG\r\n\032\n",
"image/gif": b"GIF89a",

View file

@ -7,7 +7,7 @@ from typing import TYPE_CHECKING, TypedDict
from typing_extensions import NotRequired
from beets import autotag, config, ui
from beets import config, ui
from beets.autotag import hooks
from beets.util import displayable_path
from beets.util.units import human_seconds_short
@ -17,6 +17,7 @@ if TYPE_CHECKING:
import confuse
from beets import autotag
from beets.autotag.distance import Distance
from beets.library.models import Item
from beets.ui import ColorName
@ -338,13 +339,9 @@ class ChangeRepresentation:
max_width_l = max(get_width(line_tuple[0]) for line_tuple in lines)
max_width_r = max(get_width(line_tuple[1]) for line_tuple in lines)
if (
(max_width_l <= col_width)
and (max_width_r <= col_width)
or (
((max_width_l > col_width) or (max_width_r > col_width))
and ((max_width_l + max_width_r) <= col_width * 2)
)
if ((max_width_l <= col_width) and (max_width_r <= col_width)) or (
((max_width_l > col_width) or (max_width_r > col_width))
and ((max_width_l + max_width_r) <= col_width * 2)
):
# All content fits. Either both maximum widths are below column
# widths, or one of the columns is larger than allowed but the
@ -558,7 +555,7 @@ def penalty_string(distance: Distance, limit: int | None = None) -> str:
penalties.append(key)
if penalties:
if limit and len(penalties) > limit:
penalties = penalties[:limit] + ["..."]
penalties = [*penalties[:limit], "..."]
# Prefix penalty string with U+2260: Not Equal To
penalty_string = f"\u2260 {', '.join(penalties)}"
return ui.colorize("changed", penalty_string)

View file

@ -256,13 +256,11 @@ class TerminalImportSession(importer.ImportSession):
# Add a "dummy" choice for the other baked-in option, for
# duplicate checking.
all_choices = (
[
PromptChoice("a", "Apply", None),
]
+ choices
+ extra_choices
)
all_choices = [
PromptChoice("a", "Apply", None),
*choices,
*extra_choices,
]
# Check for conflicts.
short_letters = [c.short for c in all_choices]
@ -501,7 +499,7 @@ def choose_candidate(
if config["import"]["bell"]:
ui.print_("\a", end="")
sel = ui.input_options(
("Apply", "More candidates") + choice_opts,
("Apply", "More candidates", *choice_opts),
require=require,
default=default,
)

View file

@ -1,18 +1,18 @@
"""The 'move' command: Move/copy files to the library or a new base directory."""
from __future__ import annotations
import os
from typing import TYPE_CHECKING
from beets import logging, ui
from beets.util import (
MoveOperation,
PathLike,
displayable_path,
normpath,
syspath,
)
from beets.util import MoveOperation, displayable_path, normpath, syspath
from .utils import do_query
if TYPE_CHECKING:
from beets.util import PathLike
# Global logger.
log = logging.getLogger("beets")

View file

@ -15,7 +15,7 @@ def write_items(lib, query, pretend, force):
"""Write tag information from the database to the respective files
in the filesystem.
"""
items, albums = do_query(lib, query, False, False)
items, _ = do_query(lib, query, False, False)
for item in items:
# Item deleted?

View file

@ -28,7 +28,7 @@ import sys
import tempfile
import traceback
from collections import Counter
from collections.abc import Callable, Sequence
from collections.abc import Sequence
from contextlib import suppress
from enum import Enum
from functools import cache
@ -44,7 +44,6 @@ from typing import (
Generic,
NamedTuple,
TypeVar,
Union,
cast,
)
@ -54,7 +53,7 @@ import beets
from beets.util import hidden
if TYPE_CHECKING:
from collections.abc import Iterable, Iterator
from collections.abc import Callable, Iterable, Iterator
from logging import Logger
from beets.library import Item
@ -63,8 +62,8 @@ if TYPE_CHECKING:
MAX_FILENAME_LENGTH = 200
WINDOWS_MAGIC_PREFIX = "\\\\?\\"
T = TypeVar("T")
PathLike = Union[str, bytes, Path]
StrPath = Union[str, Path]
StrPath = str | Path
PathLike = StrPath | bytes
Replacements = Sequence[tuple[Pattern[str], str]]
# Here for now to allow for a easy replace later on

View file

@ -24,6 +24,7 @@ import platform
import re
import subprocess
from abc import ABC, abstractmethod
from contextlib import suppress
from enum import Enum
from itertools import chain
from typing import TYPE_CHECKING, Any, ClassVar
@ -268,7 +269,8 @@ class IMBackend(LocalBackend):
# with regards to the height.
# ImageMagick already seems to default to no interlace, but we include
# it here for the sake of explicitness.
cmd: list[str] = self.convert_cmd + [
cmd: list[str] = [
*self.convert_cmd,
syspath(path_in, prefix=False),
"-resize",
f"{maxwidth}x>",
@ -298,7 +300,8 @@ class IMBackend(LocalBackend):
return path_out
def get_size(self, path_in: bytes) -> tuple[int, int] | None:
cmd: list[str] = self.identify_cmd + [
cmd: list[str] = [
*self.identify_cmd,
"-format",
"%w %h",
syspath(path_in, prefix=False),
@ -336,7 +339,8 @@ class IMBackend(LocalBackend):
if not path_out:
path_out = get_temp_filename(__name__, "deinterlace_IM_", path_in)
cmd = self.convert_cmd + [
cmd = [
*self.convert_cmd,
syspath(path_in, prefix=False),
"-interlace",
"none",
@ -351,7 +355,7 @@ class IMBackend(LocalBackend):
return path_in
def get_format(self, path_in: bytes) -> str | None:
cmd = self.identify_cmd + ["-format", "%[magick]", syspath(path_in)]
cmd = [*self.identify_cmd, "-format", "%[magick]", syspath(path_in)]
try:
# Image formats should really only be ASCII strings such as "PNG",
@ -368,7 +372,8 @@ class IMBackend(LocalBackend):
target: bytes,
deinterlaced: bool,
) -> bytes:
cmd = self.convert_cmd + [
cmd = [
*self.convert_cmd,
syspath(source),
*(["-interlace", "none"] if deinterlaced else []),
syspath(target),
@ -400,14 +405,16 @@ class IMBackend(LocalBackend):
# to grayscale and then pipe them into the `compare` command.
# On Windows, ImageMagick doesn't support the magic \\?\ prefix
# on paths, so we pass `prefix=False` to `syspath`.
convert_cmd = self.convert_cmd + [
convert_cmd = [
*self.convert_cmd,
syspath(im2, prefix=False),
syspath(im1, prefix=False),
"-colorspace",
"gray",
"MIFF:-",
]
compare_cmd = self.compare_cmd + [
compare_cmd = [
*self.compare_cmd,
"-define",
"phash:colorspaces=sRGB,HCLp",
"-metric",
@ -487,7 +494,7 @@ class IMBackend(LocalBackend):
("-set", k, v) for k, v in metadata.items()
)
str_file = os.fsdecode(file)
command = self.convert_cmd + [str_file, *assignments, str_file]
command = [*self.convert_cmd, str_file, *assignments, str_file]
util.command_output(command)
@ -828,7 +835,7 @@ class ArtResizer:
"jpeg": "jpg",
}.get(new_format, new_format)
fname, ext = os.path.splitext(path_in)
fname, _ = os.path.splitext(path_in)
path_new = fname + b"." + new_format.encode("utf8")
# allows the exception to propagate, while still making sure a changed
@ -840,7 +847,8 @@ class ArtResizer:
)
finally:
if result_path != path_in:
os.unlink(path_in)
with suppress(OSError):
os.unlink(path_in)
return result_path
@property

View file

@ -192,7 +192,7 @@ def stage(
task: R | T | None = None
while True:
task = yield task
task = func(*(args + (task,)))
task = func(*args, task)
return coro
@ -216,7 +216,7 @@ def mutator_stage(func: Callable[[Unpack[A], T], R]):
task = None
while True:
task = yield task
func(*(args + (task,)))
func(*args, task)
return coro

View file

@ -0,0 +1,290 @@
"""Helpers for communicating with the MusicBrainz webservice.
Provides rate-limited HTTP session and convenience methods to fetch and
normalize API responses.
This module centralizes request handling and response shaping so callers can
work with consistently structured data without embedding HTTP or rate-limit
logic throughout the codebase.
"""
from __future__ import annotations
import operator
from dataclasses import dataclass, field
from functools import cached_property, singledispatchmethod, wraps
from itertools import groupby
from typing import TYPE_CHECKING, Any, Literal, ParamSpec, TypedDict, TypeVar
from requests_ratelimiter import LimiterMixin
from typing_extensions import NotRequired, Unpack
from beets import config, logging
from .requests import RequestHandler, TimeoutAndRetrySession
if TYPE_CHECKING:
from collections.abc import Callable
from requests import Response
from .._typing import JSONDict
log = logging.getLogger(__name__)
class LimiterTimeoutSession(LimiterMixin, TimeoutAndRetrySession):
"""HTTP session that enforces rate limits."""
Entity = Literal[
"area",
"artist",
"collection",
"event",
"genre",
"instrument",
"label",
"place",
"recording",
"release",
"release-group",
"series",
"work",
"url",
]
class LookupKwargs(TypedDict, total=False):
includes: NotRequired[list[str]]
class PagingKwargs(TypedDict, total=False):
limit: NotRequired[int]
offset: NotRequired[int]
class SearchKwargs(PagingKwargs):
query: NotRequired[str]
class BrowseKwargs(LookupKwargs, PagingKwargs, total=False):
pass
class BrowseReleaseGroupsKwargs(BrowseKwargs, total=False):
artist: NotRequired[str]
collection: NotRequired[str]
release: NotRequired[str]
class BrowseRecordingsKwargs(BrowseReleaseGroupsKwargs, total=False):
work: NotRequired[str]
P = ParamSpec("P")
R = TypeVar("R")
def require_one_of(*keys: str) -> Callable[[Callable[P, R]], Callable[P, R]]:
required = frozenset(keys)
def deco(func: Callable[P, R]) -> Callable[P, R]:
@wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:
# kwargs is a real dict at runtime; safe to inspect here
if not required & kwargs.keys():
required_str = ", ".join(sorted(required))
raise ValueError(
f"At least one of {required_str} filter is required"
)
return func(*args, **kwargs)
return wrapper
return deco
@dataclass
class MusicBrainzAPI(RequestHandler):
"""High-level interface to the MusicBrainz WS/2 API.
Responsibilities:
- Configure the API host and request rate from application configuration.
- Offer helpers to fetch common entity types and to run searches.
- Normalize MusicBrainz responses so relation lists are grouped by target
type for easier downstream consumption.
Documentation: https://musicbrainz.org/doc/MusicBrainz_API
"""
api_host: str = field(init=False)
rate_limit: float = field(init=False)
def __post_init__(self) -> None:
mb_config = config["musicbrainz"]
mb_config.add(
{
"host": "musicbrainz.org",
"https": False,
"ratelimit": 1,
"ratelimit_interval": 1,
}
)
hostname = mb_config["host"].as_str()
if hostname == "musicbrainz.org":
self.api_host, self.rate_limit = "https://musicbrainz.org", 1.0
else:
https = mb_config["https"].get(bool)
self.api_host = f"http{'s' if https else ''}://{hostname}"
self.rate_limit = (
mb_config["ratelimit"].get(int)
/ mb_config["ratelimit_interval"].as_number()
)
@cached_property
def api_root(self) -> str:
return f"{self.api_host}/ws/2"
def create_session(self) -> LimiterTimeoutSession:
return LimiterTimeoutSession(per_second=self.rate_limit)
def request(self, *args, **kwargs) -> Response:
"""Ensure all requests specify JSON response format by default."""
kwargs.setdefault("params", {})
kwargs["params"]["fmt"] = "json"
return super().request(*args, **kwargs)
def _get_resource(
self, resource: str, includes: list[str] | None = None, **kwargs
) -> JSONDict:
"""Retrieve and normalize data from the API resource endpoint.
If requested, includes are appended to the request. The response is
passed through a normalizer that groups relation entries by their
target type so that callers receive a consistently structured mapping.
"""
if includes:
kwargs["inc"] = "+".join(includes)
return self._group_relations(
self.get_json(f"{self.api_root}/{resource}", params=kwargs)
)
def _lookup(
self, entity: Entity, id_: str, **kwargs: Unpack[LookupKwargs]
) -> JSONDict:
return self._get_resource(f"{entity}/{id_}", **kwargs)
def _browse(self, entity: Entity, **kwargs) -> list[JSONDict]:
return self._get_resource(entity, **kwargs).get(f"{entity}s", [])
def search(
self,
entity: Entity,
filters: dict[str, str],
**kwargs: Unpack[SearchKwargs],
) -> list[JSONDict]:
"""Search for MusicBrainz entities matching the given filters.
* Query is constructed by combining the provided filters using AND logic
* Each filter key-value pair is formatted as 'key:"value"' unless
- 'key' is empty, in which case only the value is used, '"value"'
- 'value' is empty, in which case the filter is ignored
* Values are lowercased and stripped of whitespace.
"""
query = " AND ".join(
":".join(filter(None, (k, f'"{_v}"')))
for k, v in filters.items()
if (_v := v.lower().strip())
)
log.debug("Searching for MusicBrainz {}s with: {!r}", entity, query)
kwargs["query"] = query
return self._get_resource(entity, **kwargs)[f"{entity}s"]
def get_release(self, id_: str, **kwargs: Unpack[LookupKwargs]) -> JSONDict:
"""Retrieve a release by its MusicBrainz ID."""
return self._lookup("release", id_, **kwargs)
def get_recording(
self, id_: str, **kwargs: Unpack[LookupKwargs]
) -> JSONDict:
"""Retrieve a recording by its MusicBrainz ID."""
return self._lookup("recording", id_, **kwargs)
def get_work(self, id_: str, **kwargs: Unpack[LookupKwargs]) -> JSONDict:
"""Retrieve a work by its MusicBrainz ID."""
return self._lookup("work", id_, **kwargs)
@require_one_of("artist", "collection", "release", "work")
def browse_recordings(
self, **kwargs: Unpack[BrowseRecordingsKwargs]
) -> list[JSONDict]:
"""Browse recordings related to the given entities.
At least one of artist, collection, release, or work must be provided.
"""
return self._browse("recording", **kwargs)
@require_one_of("artist", "collection", "release")
def browse_release_groups(
self, **kwargs: Unpack[BrowseReleaseGroupsKwargs]
) -> list[JSONDict]:
"""Browse release groups related to the given entities.
At least one of artist, collection, or release must be provided.
"""
return self._get_resource("release-group", **kwargs)["release-groups"]
@singledispatchmethod
@classmethod
def _group_relations(cls, data: Any) -> Any:
"""Normalize MusicBrainz 'relations' into type-keyed fields recursively.
This helper rewrites payloads that use a generic 'relations' list into
a structure that is easier to consume downstream. When a mapping
contains 'relations', those entries are regrouped by their 'target-type'
and stored under keys like '<target-type>-relations'. The original
'relations' key is removed to avoid ambiguous access patterns.
The transformation is applied recursively so that nested objects and
sequences are normalized consistently, while non-container values are
left unchanged.
"""
return data
@_group_relations.register(list)
@classmethod
def _(cls, data: list[Any]) -> list[Any]:
return [cls._group_relations(i) for i in data]
@_group_relations.register(dict)
@classmethod
def _(cls, data: JSONDict) -> JSONDict:
for k, v in list(data.items()):
if k == "relations":
get_target_type = operator.methodcaller("get", "target-type")
for target_type, group in groupby(
sorted(v, key=get_target_type), get_target_type
):
relations = [
{k: v for k, v in item.items() if k != "target-type"}
for item in group
]
data[f"{target_type}-relations"] = cls._group_relations(
relations
)
data.pop("relations")
else:
data[k] = cls._group_relations(v)
return data
class MusicBrainzAPIMixin:
"""Mixin that provides a cached MusicBrainzAPI helper instance."""
@cached_property
def mb_api(self) -> MusicBrainzAPI:
return MusicBrainzAPI()

View file

@ -67,7 +67,7 @@ class TimeoutAndRetrySession(requests.Session, metaclass=SingletonMeta):
* default beets User-Agent header
* default request timeout
* automatic retries on transient connection errors
* automatic retries on transient connection or server errors
* raises exceptions for HTTP error status codes
"""
@ -75,7 +75,18 @@ class TimeoutAndRetrySession(requests.Session, metaclass=SingletonMeta):
super().__init__(*args, **kwargs)
self.headers["User-Agent"] = f"beets/{__version__} https://beets.io/"
retry = Retry(connect=2, total=2, backoff_factor=1)
retry = Retry(
connect=2,
total=2,
backoff_factor=1,
# Retry on server errors
status_forcelist=[
HTTPStatus.INTERNAL_SERVER_ERROR,
HTTPStatus.BAD_GATEWAY,
HTTPStatus.SERVICE_UNAVAILABLE,
HTTPStatus.GATEWAY_TIMEOUT,
],
)
adapter = HTTPAdapter(max_retries=retry)
self.mount("https://", adapter)
self.mount("http://", adapter)
@ -102,18 +113,20 @@ class RequestHandler:
subclasses.
Usage:
Subclass and override :class:`RequestHandler.session_type`,
Subclass and override :class:`RequestHandler.create_session`,
:class:`RequestHandler.explicit_http_errors` or
:class:`RequestHandler.status_to_error()` to customize behavior.
Use
* :class:`RequestHandler.get_json()` to get JSON response data
* :class:`RequestHandler.get()` to get HTTP response object
* :class:`RequestHandler.request()` to invoke arbitrary HTTP methods
Use
Feel free to define common methods that are used in multiple plugins.
- :class:`RequestHandler.get_json()` to get JSON response data
- :class:`RequestHandler.get()` to get HTTP response object
- :class:`RequestHandler.request()` to invoke arbitrary HTTP methods
Feel free to define common methods that are used in multiple plugins.
"""
#: List of custom exceptions to be raised for specific status codes.
explicit_http_errors: ClassVar[list[type[BeetsHTTPError]]] = [
HTTPNotFoundError
]
@ -127,7 +140,6 @@ class RequestHandler:
@cached_property
def session(self) -> TimeoutAndRetrySession:
"""Lazily initialize and cache the HTTP session."""
return self.create_session()
def status_to_error(
@ -155,6 +167,7 @@ class RequestHandler:
except requests.exceptions.HTTPError as e:
if beets_error := self.status_to_error(e.response.status_code):
raise beets_error(response=e.response) from e
raise
def request(self, *args, **kwargs) -> requests.Response:
@ -170,6 +183,14 @@ class RequestHandler:
"""Perform HTTP GET request with automatic error handling."""
return self.request("get", *args, **kwargs)
def put(self, *args, **kwargs) -> requests.Response:
"""Perform HTTP PUT request with automatic error handling."""
return self.request("put", *args, **kwargs)
def delete(self, *args, **kwargs) -> requests.Response:
"""Perform HTTP DELETE request with automatic error handling."""
return self.request("delete", *args, **kwargs)
def get_json(self, *args, **kwargs):
"""Fetch and parse JSON data from an HTTP endpoint."""
return self.get(*args, **kwargs).json()

View file

@ -15,6 +15,7 @@
"""Fetch various AcousticBrainz metadata using MBID."""
from collections import defaultdict
from typing import ClassVar
import requests
@ -55,7 +56,7 @@ ABSCHEME = {
class AcousticPlugin(plugins.BeetsPlugin):
item_types = {
item_types: ClassVar[dict[str, types.Type]] = {
"average_loudness": types.Float(6),
"chords_changes_rate": types.Float(6),
"chords_key": types.STRING,

View file

@ -14,11 +14,17 @@
"""Adds an album template field for formatted album types."""
from beets.library import Album
from __future__ import annotations
from typing import TYPE_CHECKING
from beets.plugins import BeetsPlugin
from .musicbrainz import VARIOUS_ARTISTS_ID
if TYPE_CHECKING:
from beets.library import Album
class AlbumTypesPlugin(BeetsPlugin):
"""Adds an album template field for formatted album types."""

View file

@ -14,12 +14,13 @@
"""An AURA server using Flask."""
from __future__ import annotations
import os
import re
from collections.abc import Mapping
from dataclasses import dataclass
from mimetypes import guess_type
from typing import ClassVar
from typing import TYPE_CHECKING, ClassVar
from flask import (
Blueprint,
@ -40,12 +41,17 @@ from beets.dbcore.query import (
NotQuery,
RegexpQuery,
SlowFieldSort,
SQLiteType,
)
from beets.library import Album, Item, LibModel, Library
from beets.library import Album, Item
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, _open_library
if TYPE_CHECKING:
from collections.abc import Mapping
from beets.dbcore.query import SQLiteType
from beets.library import LibModel, Library
# Constants
# AURA server information

View file

@ -26,7 +26,7 @@ import sys
import time
import traceback
from string import Template
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, ClassVar
import beets
import beets.ui
@ -1037,7 +1037,7 @@ class Command:
raise BPDError(ERROR_PERMISSION, "insufficient privileges")
try:
args = [conn] + self.args
args = [conn, *self.args]
results = func(*args)
if results:
for data in results:
@ -1344,7 +1344,7 @@ class Server(BaseServer):
# Searching.
tagtype_map = {
tagtype_map: ClassVar[dict[str, str]] = {
"Artist": "artist",
"ArtistSort": "artist_sort",
"Album": "album",

View file

@ -37,7 +37,7 @@ except ValueError as e:
# makes it so the test collector functions as inteded.
raise ImportError from e
from gi.repository import GLib, Gst # noqa: E402
from gi.repository import GLib, Gst
Gst.init(None)
@ -115,7 +115,7 @@ class GstPlayer:
elif message.type == Gst.MessageType.ERROR:
# error
self.player.set_state(Gst.State.NULL)
err, debug = message.parse_error()
err, _ = message.parse_error()
print(f"Error: {err}")
self.playing = False
@ -205,7 +205,7 @@ class GstPlayer:
def seek(self, position):
"""Seeks to position (in seconds)."""
cur_pos, cur_len = self.time()
_, cur_len = self.time()
if position > cur_len:
self.stop()
return

View file

@ -73,7 +73,7 @@ class BPSyncPlugin(BeetsPlugin):
"""Retrieve and apply info from the autotagger for items matched by
query.
"""
for item in lib.items(query + ["singleton:true"]):
for item in lib.items([*query, "singleton:true"]):
if not item.mb_trackid:
self._log.info(
"Skipping singleton with no mb_trackid: {}", item

View file

@ -16,20 +16,26 @@
autotagger. Requires the pyacoustid library.
"""
from __future__ import annotations
import re
from collections import defaultdict
from collections.abc import Iterable
from functools import cached_property, partial
from typing import TYPE_CHECKING
import acoustid
import confuse
from beets import config, ui, util
from beets.autotag.distance import Distance
from beets.autotag.hooks import TrackInfo
from beets.metadata_plugins import MetadataSourcePlugin
from beetsplug.musicbrainz import MusicBrainzPlugin
if TYPE_CHECKING:
from collections.abc import Iterable
from beets.autotag.hooks import TrackInfo
API_KEY = "1vOwZtEn"
SCORE_THRESH = 0.5
TRACK_ID_WEIGHT = 10.0

View file

@ -274,11 +274,15 @@ class ConvertPlugin(BeetsPlugin):
pretend,
hardlink,
link,
playlist,
_,
force,
) = self._get_opts_and_config(empty_opts)
items = task.imported_items()
# Filter items based on should_transcode function
items = [item for item in items if should_transcode(item, fmt)]
self._parallel_convert(
dest,
False,

View file

@ -18,29 +18,26 @@ from __future__ import annotations
import collections
import time
from typing import TYPE_CHECKING, Literal
from typing import TYPE_CHECKING, ClassVar, Literal
import requests
from beets import ui
from beets.autotag import AlbumInfo, TrackInfo
from beets.dbcore import types
from beets.metadata_plugins import (
IDResponse,
SearchApiMetadataSourcePlugin,
SearchFilter,
)
from beets.metadata_plugins import IDResponse, SearchApiMetadataSourcePlugin
if TYPE_CHECKING:
from collections.abc import Sequence
from beets.library import Item, Library
from beets.metadata_plugins import SearchFilter
from ._typing import JSONDict
class DeezerPlugin(SearchApiMetadataSourcePlugin[IDResponse]):
item_types = {
item_types: ClassVar[dict[str, types.Type]] = {
"deezer_track_rank": types.INTEGER,
"deezer_track_id": types.INTEGER,
"deezer_updated": types.DATE,

View file

@ -27,13 +27,12 @@ import time
import traceback
from functools import cache
from string import ascii_lowercase
from typing import TYPE_CHECKING, cast
from typing import TYPE_CHECKING
import confuse
from discogs_client import Client, Master, Release
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
from typing_extensions import NotRequired, TypedDict
import beets
import beets.ui
@ -42,15 +41,20 @@ from beets.autotag.distance import string_dist
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.metadata_plugins import MetadataSourcePlugin
from .states import DISAMBIGUATION_RE, ArtistState, TracklistState
if TYPE_CHECKING:
from collections.abc import Callable, Iterable, Sequence
from beets.library import Item
from .types import ReleaseFormat, Track
USER_AGENT = f"beets/{beets.__version__} +https://beets.io/"
API_KEY = "rAzVUQYRaoFjeBjyWuWZ"
API_SECRET = "plxtUTqoCzwxZpqdPysCwGuBSmZNdZVy"
# Exceptions that discogs_client should really handle but does not.
CONNECTION_ERRORS = (
ConnectionError,
@ -60,7 +64,6 @@ CONNECTION_ERRORS = (
DiscogsAPIError,
)
TRACK_INDEX_RE = re.compile(
r"""
(.*?) # medium: everything before medium_index.
@ -76,50 +79,6 @@ TRACK_INDEX_RE = re.compile(
re.VERBOSE,
)
DISAMBIGUATION_RE = re.compile(r" \(\d+\)")
class ReleaseFormat(TypedDict):
name: str
qty: int
descriptions: list[str] | None
class Artist(TypedDict):
name: str
anv: str
join: str
role: str
tracks: str
id: str
resource_url: str
class Track(TypedDict):
position: str
type_: str
title: str
duration: str
artists: list[Artist]
extraartists: NotRequired[list[Artist]]
class TrackWithSubtracks(Track):
sub_tracks: list[TrackWithSubtracks]
class IntermediateTrackInfo(TrackInfo):
"""Allows work with string mediums from
get_track_info"""
def __init__(
self,
medium_str: str | None,
**kwargs,
) -> None:
self.medium_str = medium_str
super().__init__(**kwargs)
class DiscogsPlugin(MetadataSourcePlugin):
def __init__(self):
@ -277,7 +236,6 @@ class DiscogsPlugin(MetadataSourcePlugin):
for track in album.tracks:
if track.track_id == track_id:
return track
return None
def get_albums(self, query: str) -> Iterable[AlbumInfo]:
@ -343,25 +301,6 @@ class DiscogsPlugin(MetadataSourcePlugin):
return media, albumtype
def get_artist_with_anv(
self, artists: list[Artist], use_anv: bool = False
) -> tuple[str, str | None]:
"""Iterates through a discogs result, fetching data
if the artist anv is to be used, maps that to the name.
Calls the parent class get_artist method."""
artist_list: list[dict[str | int, str]] = []
for artist_data in artists:
a: dict[str | int, str] = {
"name": artist_data["name"],
"id": artist_data["id"],
"join": artist_data.get("join", ""),
}
if use_anv and (anv := artist_data.get("anv", "")):
a["name"] = anv
artist_list.append(a)
artist, artist_id = self.get_artist(artist_list, join_key="join")
return self.strip_disambiguation(artist), artist_id
def get_album_info(self, result: Release) -> AlbumInfo | None:
"""Returns an AlbumInfo object for a discogs Release object."""
# Explicitly reload the `Release` fields, as they might not be yet
@ -391,11 +330,10 @@ class DiscogsPlugin(MetadataSourcePlugin):
return None
artist_data = [a.data for a in result.artists]
album_artist, album_artist_id = self.get_artist_with_anv(artist_data)
album_artist_anv, _ = self.get_artist_with_anv(
artist_data, use_anv=True
# Information for the album artist
albumartist = ArtistState.from_config(
self.config, artist_data, for_album_artist=True
)
artist_credit = album_artist_anv
album = re.sub(r" +", " ", result.title)
album_id = result.data["id"]
@ -405,19 +343,13 @@ class DiscogsPlugin(MetadataSourcePlugin):
# each make an API call just to get the same data back.
tracks = self.get_tracks(
result.data["tracklist"],
(album_artist, album_artist_anv, album_artist_id),
ArtistState.from_config(self.config, artist_data),
)
# Assign ANV to the proper fields for tagging
if not self.config["anv"]["artist_credit"]:
artist_credit = album_artist
if self.config["anv"]["album_artist"]:
album_artist = album_artist_anv
# Extract information for the optional AlbumInfo fields, if possible.
va = result.data["artists"][0].get("name", "").lower() == "various"
va = albumartist.artist == config["va_name"].as_str()
year = result.data.get("year")
mediums = [t.medium for t in tracks]
mediums = [t["medium"] for t in tracks]
country = result.data.get("country")
data_url = result.data.get("uri")
style = self.format(result.data.get("styles"))
@ -447,11 +379,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
cover_art_url = self.select_cover_art(result)
# Additional cleanups
# (various artists name, catalog number, media, disambiguation).
if va:
va_name = config["va_name"].as_str()
album_artist = va_name
artist_credit = va_name
# (catalog number, media, disambiguation).
if catalogno == "none":
catalogno = None
# Explicitly set the `media` for the tracks, since it is expected by
@ -474,9 +402,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
return AlbumInfo(
album=album,
album_id=album_id,
artist=album_artist,
artist_credit=artist_credit,
artist_id=album_artist_id,
**albumartist.info, # Unpacks values to satisfy the keyword arguments
tracks=tracks,
albumtype=albumtype,
va=va,
@ -494,7 +420,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
data_url=data_url,
discogs_albumid=discogs_albumid,
discogs_labelid=labelid,
discogs_artistid=album_artist_id,
discogs_artistid=albumartist.artist_id,
cover_art_url=cover_art_url,
)
@ -516,63 +442,22 @@ class DiscogsPlugin(MetadataSourcePlugin):
else:
return None
def _process_clean_tracklist(
self,
clean_tracklist: list[Track],
album_artist_data: tuple[str, str, str | None],
) -> tuple[list[TrackInfo], dict[int, str], int, list[str], list[str]]:
# Distinct works and intra-work divisions, as defined by index tracks.
tracks: list[TrackInfo] = []
index_tracks = {}
index = 0
divisions: list[str] = []
next_divisions: list[str] = []
for track in clean_tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track["position"]:
index += 1
if next_divisions:
# End of a block of index tracks: update the current
# divisions.
divisions += next_divisions
del next_divisions[:]
track_info = self.get_track_info(
track, index, divisions, album_artist_data
)
track_info.track_alt = track["position"]
tracks.append(track_info)
else:
next_divisions.append(track["title"])
# We expect new levels of division at the beginning of the
# tracklist (and possibly elsewhere).
try:
divisions.pop()
except IndexError:
pass
index_tracks[index + 1] = track["title"]
return tracks, index_tracks, index, divisions, next_divisions
def get_tracks(
self,
tracklist: list[Track],
album_artist_data: tuple[str, str, str | None],
albumartistinfo: ArtistState,
) -> list[TrackInfo]:
"""Returns a list of TrackInfo objects for a discogs tracklist."""
try:
clean_tracklist: list[Track] = self.coalesce_tracks(
cast(list[TrackWithSubtracks], tracklist)
)
clean_tracklist: list[Track] = self._coalesce_tracks(tracklist)
except Exception as exc:
# FIXME: this is an extra precaution for making sure there are no
# side effects after #2222. It should be removed after further
# testing.
self._log.debug("{}", traceback.format_exc())
self._log.error("uncaught exception in coalesce_tracks: {}", exc)
self._log.error("uncaught exception in _coalesce_tracks: {}", exc)
clean_tracklist = tracklist
processed = self._process_clean_tracklist(
clean_tracklist, album_artist_data
)
tracks, index_tracks, index, divisions, next_divisions = processed
t = TracklistState.build(self, clean_tracklist, albumartistinfo)
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
@ -581,32 +466,36 @@ class DiscogsPlugin(MetadataSourcePlugin):
# If a medium has two sides (ie. vinyl or cassette), each pair of
# consecutive sides should belong to the same medium.
if all([track.medium_str is not None for track in tracks]):
m = sorted({track.medium_str.lower() for track in tracks})
if all([medium is not None for medium in t.mediums]):
m = sorted(
{medium.lower() if medium else "" for medium in t.mediums}
)
# If all track.medium are single consecutive letters, assume it is
# a 2-sided medium.
if "".join(m) in ascii_lowercase:
sides_per_medium = 2
for track in tracks:
for i, track in enumerate(t.tracks):
# Handle special case where a different medium does not indicate a
# new disc, when there is no medium_index and the ordinal of medium
# is not sequential. For example, I, II, III, IV, V. Assume these
# are the track index, not the medium.
# side_count is the number of mediums or medium sides (in the case
# of two-sided mediums) that were seen before.
medium_str = t.mediums[i]
medium_index = t.medium_indices[i]
medium_is_index = (
track.medium_str
and not track.medium_index
medium_str
and not medium_index
and (
len(track.medium_str) != 1
len(medium_str) != 1
or
# Not within standard incremental medium values (A, B, C, ...).
ord(track.medium_str) - 64 != side_count + 1
ord(medium_str) - 64 != side_count + 1
)
)
if not medium_is_index and medium != track.medium_str:
if not medium_is_index and medium != medium_str:
side_count += 1
if sides_per_medium == 2:
if side_count % sides_per_medium:
@ -617,7 +506,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
# Medium changed. Reset index_count.
medium_count += 1
index_count = 0
medium = track.medium_str
medium = medium_str
index_count += 1
medium_count = 1 if medium_count == 0 else medium_count
@ -625,69 +514,25 @@ class DiscogsPlugin(MetadataSourcePlugin):
# Get `disctitle` from Discogs index tracks. Assume that an index track
# before the first track of each medium is a disc title.
for track in tracks:
for track in t.tracks:
if track.medium_index == 1:
if track.index in index_tracks:
disctitle = index_tracks[track.index]
if track.index in t.index_tracks:
disctitle = t.index_tracks[track.index]
else:
disctitle = None
track.disctitle = disctitle
return cast(list[TrackInfo], tracks)
return t.tracks
def coalesce_tracks(
self, raw_tracklist: list[TrackWithSubtracks]
) -> list[Track]:
def _coalesce_tracks(self, raw_tracklist: list[Track]) -> list[Track]:
"""Pre-process a tracklist, merging subtracks into a single track. The
title for the merged track is the one from the previous index track,
if present; otherwise it is a combination of the subtracks titles.
"""
def add_merged_subtracks(
tracklist: list[TrackWithSubtracks],
subtracks: list[TrackWithSubtracks],
) -> None:
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = self.get_track_index(
subtracks[0]["position"]
)
position = f"{idx or ''}{medium_idx or ''}"
if tracklist and not tracklist[-1]["position"]:
# Assume the previous index track contains the track title.
if sub_idx:
# "Convert" the track title to a real track, discarding the
# subtracks assuming they are logical divisions of a
# physical track (12.2.9 Subtracks).
tracklist[-1]["position"] = position
else:
# Promote the subtracks to real tracks, discarding the
# index track, assuming the subtracks are physical tracks.
index_track = tracklist.pop()
# Fix artists when they are specified on the index track.
if index_track.get("artists"):
for subtrack in subtracks:
if not subtrack.get("artists"):
subtrack["artists"] = index_track["artists"]
# Concatenate index with track title when index_tracks
# option is set
if self.config["index_tracks"]:
for subtrack in subtracks:
subtrack["title"] = (
f"{index_track['title']}: {subtrack['title']}"
)
tracklist.extend(subtracks)
else:
# Merge the subtracks, pick a title, and append the new track.
track = subtracks[0].copy()
track["title"] = " / ".join([t["title"] for t in subtracks])
tracklist.append(track)
# Pre-process the tracklist, trying to identify subtracks.
subtracks: list[TrackWithSubtracks] = []
tracklist: list[TrackWithSubtracks] = []
subtracks: list[Track] = []
tracklist: list[Track] = []
prev_subindex = ""
for track in raw_tracklist:
# Regular subtrack (track with subindex).
@ -699,7 +544,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
subtracks.append(track)
else:
# Subtrack part of a new group (..., 1.3, *2.1*, ...).
add_merged_subtracks(tracklist, subtracks)
self._add_merged_subtracks(tracklist, subtracks)
subtracks = [track]
prev_subindex = subindex.rjust(len(raw_tracklist))
continue
@ -708,21 +553,64 @@ class DiscogsPlugin(MetadataSourcePlugin):
if not track["position"] and "sub_tracks" in track:
# Append the index track, assuming it contains the track title.
tracklist.append(track)
add_merged_subtracks(tracklist, track["sub_tracks"])
self._add_merged_subtracks(tracklist, track["sub_tracks"])
continue
# Regular track or index track without nested sub_tracks.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
self._add_merged_subtracks(tracklist, subtracks)
subtracks = []
prev_subindex = ""
tracklist.append(track)
# Merge and add the remaining subtracks, if any.
if subtracks:
add_merged_subtracks(tracklist, subtracks)
self._add_merged_subtracks(tracklist, subtracks)
return cast(list[Track], tracklist)
return tracklist
def _add_merged_subtracks(
self,
tracklist: list[Track],
subtracks: list[Track],
) -> None:
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = self.get_track_index(
subtracks[0]["position"]
)
position = f"{idx or ''}{medium_idx or ''}"
if tracklist and not tracklist[-1]["position"]:
# Assume the previous index track contains the track title.
if sub_idx:
# "Convert" the track title to a real track, discarding the
# subtracks assuming they are logical divisions of a
# physical track (12.2.9 Subtracks).
tracklist[-1]["position"] = position
else:
# Promote the subtracks to real tracks, discarding the
# index track, assuming the subtracks are physical tracks.
index_track = tracklist.pop()
# Fix artists when they are specified on the index track.
if index_track.get("artists"):
for subtrack in subtracks:
if not subtrack.get("artists"):
subtrack["artists"] = index_track["artists"]
# Concatenate index with track title when index_tracks
# option is set
if self.config["index_tracks"]:
for subtrack in subtracks:
subtrack["title"] = (
f"{index_track['title']}: {subtrack['title']}"
)
tracklist.extend(subtracks)
else:
# Merge the subtracks, pick a title, and append the new track.
track = subtracks[0].copy()
track["title"] = " / ".join([t["title"] for t in subtracks])
tracklist.append(track)
def strip_disambiguation(self, text: str) -> str:
"""Removes discogs specific disambiguations from a string.
@ -737,17 +625,10 @@ class DiscogsPlugin(MetadataSourcePlugin):
track: Track,
index: int,
divisions: list[str],
album_artist_data: tuple[str, str, str | None],
) -> IntermediateTrackInfo:
albumartistinfo: ArtistState,
) -> tuple[TrackInfo, str | None, str | None]:
"""Returns a TrackInfo object for a discogs track."""
artist, artist_anv, artist_id = album_artist_data
artist_credit = artist_anv
if not self.config["anv"]["artist_credit"]:
artist_credit = artist
if self.config["anv"]["artist"]:
artist = artist_anv
title = track["title"]
if self.config["index_tracks"]:
prefix = ", ".join(divisions)
@ -756,44 +637,26 @@ class DiscogsPlugin(MetadataSourcePlugin):
track_id = None
medium, medium_index, _ = self.get_track_index(track["position"])
# If artists are found on the track, we will use those instead
if artists := track.get("artists", []):
artist, artist_id = self.get_artist_with_anv(
artists, self.config["anv"]["artist"]
)
artist_credit, _ = self.get_artist_with_anv(
artists, self.config["anv"]["artist_credit"]
)
length = self.get_track_length(track["duration"])
# If artists are found on the track, we will use those instead
artistinfo = ArtistState.from_config(
self.config,
[
*(track.get("artists") or albumartistinfo.raw_artists),
*track.get("extraartists", []),
],
)
# Add featured artists
if extraartists := track.get("extraartists", []):
featured_list = [
artist
for artist in extraartists
if "Featuring" in artist["role"]
]
featured, _ = self.get_artist_with_anv(
featured_list, self.config["anv"]["artist"]
)
featured_credit, _ = self.get_artist_with_anv(
featured_list, self.config["anv"]["artist_credit"]
)
if featured:
artist += f" {self.config['featured_string']} {featured}"
artist_credit += (
f" {self.config['featured_string']} {featured_credit}"
)
return IntermediateTrackInfo(
title=title,
track_id=track_id,
artist_credit=artist_credit,
artist=artist,
artist_id=artist_id,
length=length,
index=index,
medium_str=medium,
medium_index=medium_index,
return (
TrackInfo(
title=title,
track_id=track_id,
**artistinfo.info,
length=length,
index=index,
),
medium,
medium_index,
)
@staticmethod

237
beetsplug/discogs/states.py Normal file
View file

@ -0,0 +1,237 @@
# This file is part of beets.
# Copyright 2025, Sarunas Nejus, Henry Oberholtzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Dataclasses for managing artist credits and tracklists from Discogs."""
from __future__ import annotations
import re
from dataclasses import asdict, dataclass, field
from functools import cached_property
from typing import TYPE_CHECKING, NamedTuple
from beets import config
from .types import ArtistInfo
if TYPE_CHECKING:
from confuse import ConfigView
from beets.autotag.hooks import TrackInfo
from . import DiscogsPlugin
from .types import Artist, Track, TracklistInfo
DISAMBIGUATION_RE = re.compile(r" \(\d+\)")
@dataclass
class ArtistState:
"""Represent Discogs artist credits.
This object centralizes the plugin's policy for which Discogs artist fields
to prefer (name vs. ANV), how to treat 'Various', how to format join
phrases, and how to separate featured artists. It exposes both per-artist
components and fully joined strings for common tag targets like 'artist' and
'artist_credit'.
"""
class ValidArtist(NamedTuple):
"""A normalized, render-ready artist entry extracted from Discogs data.
Instances represent the subset of Discogs artist information needed for
tagging, including the join token following the artist and whether the
entry is considered a featured appearance.
"""
id: str
name: str
credit: str
join: str
is_feat: bool
def get_artist(self, property_name: str) -> str:
"""Return the requested display field with its trailing join token.
The join token is normalized so commas become ', ' and other join
phrases are surrounded with spaces, producing a single fragment that
can be concatenated to form a full artist string.
"""
join = {",": ", ", "": ""}.get(self.join, f" {self.join} ")
return f"{getattr(self, property_name)}{join}"
raw_artists: list[Artist]
use_anv: bool
use_credit_anv: bool
featured_string: str
should_strip_disambiguation: bool
@property
def info(self) -> ArtistInfo:
"""Expose the state in the shape expected by downstream tag mapping."""
return {k: getattr(self, k) for k in ArtistInfo.__annotations__} # type: ignore[return-value]
def strip_disambiguation(self, text: str) -> str:
"""Strip Discogs disambiguation suffixes from an artist or label string.
This removes Discogs-specific numeric suffixes like 'Name (5)' and can
be applied to multi-artist strings as well (e.g., 'A (1) & B (2)'). When
the feature is disabled, the input is returned unchanged.
"""
if self.should_strip_disambiguation:
return DISAMBIGUATION_RE.sub("", text)
return text
@cached_property
def valid_artists(self) -> list[ValidArtist]:
"""Build the ordered, filtered list of artists used for rendering.
The resulting list normalizes Discogs entries by:
- substituting the configured 'Various Artists' name when Discogs uses
'Various'
- choosing between name and ANV according to plugin settings
- excluding non-empty roles unless they indicate a featured appearance
- capturing join tokens so the original credit formatting is preserved
"""
va_name = config["va_name"].as_str()
return [
self.ValidArtist(
str(a["id"]),
self.strip_disambiguation(anv if self.use_anv else name),
self.strip_disambiguation(anv if self.use_credit_anv else name),
a["join"].strip(),
is_feat,
)
for a in self.raw_artists
if (
(name := va_name if a["name"] == "Various" else a["name"])
and (anv := a["anv"] or name)
and (
(is_feat := ("featuring" in a["role"].lower()))
or not a["role"]
)
)
]
@property
def artists_ids(self) -> list[str]:
"""Return Discogs artist IDs for all valid artists, preserving order."""
return [a.id for a in self.valid_artists]
@property
def artist_id(self) -> str:
"""Return the primary Discogs artist ID."""
return self.artists_ids[0]
@property
def artists(self) -> list[str]:
"""Return the per-artist display names used for the 'artist' field."""
return [a.name for a in self.valid_artists]
@property
def artists_credit(self) -> list[str]:
"""Return the per-artist display names used for the credit field."""
return [a.credit for a in self.valid_artists]
@property
def artist(self) -> str:
"""Return the fully rendered artist string using display names."""
return self.join_artists("name")
@property
def artist_credit(self) -> str:
"""Return the fully rendered artist credit string."""
return self.join_artists("credit")
def join_artists(self, property_name: str) -> str:
"""Render a single artist string with join phrases and featured artists.
Non-featured artists are concatenated using their join tokens. Featured
artists are appended after the configured 'featured' marker, preserving
Discogs order while keeping featured credits separate from the main
artist string.
"""
non_featured = [a for a in self.valid_artists if not a.is_feat]
featured = [a for a in self.valid_artists if a.is_feat]
artist = "".join(a.get_artist(property_name) for a in non_featured)
if featured:
if "feat" not in artist:
artist += f" {self.featured_string} "
artist += ", ".join(a.get_artist(property_name) for a in featured)
return artist
@classmethod
def from_config(
cls,
config: ConfigView,
artists: list[Artist],
for_album_artist: bool = False,
) -> ArtistState:
return cls(
artists,
config["anv"]["album_artist" if for_album_artist else "artist"].get(
bool
),
config["anv"]["artist_credit"].get(bool),
config["featured_string"].as_str(),
config["strip_disambiguation"].get(bool),
)
@dataclass
class TracklistState:
index: int = 0
index_tracks: dict[int, str] = field(default_factory=dict)
tracks: list[TrackInfo] = field(default_factory=list)
divisions: list[str] = field(default_factory=list)
next_divisions: list[str] = field(default_factory=list)
mediums: list[str | None] = field(default_factory=list)
medium_indices: list[str | None] = field(default_factory=list)
@property
def info(self) -> TracklistInfo:
return asdict(self) # type: ignore[return-value]
@classmethod
def build(
cls,
plugin: DiscogsPlugin,
clean_tracklist: list[Track],
albumartistinfo: ArtistState,
) -> TracklistState:
state = cls()
for track in clean_tracklist:
if track["position"]:
state.index += 1
if state.next_divisions:
state.divisions += state.next_divisions
state.next_divisions.clear()
track_info, medium, medium_index = plugin.get_track_info(
track, state.index, state.divisions, albumartistinfo
)
track_info.track_alt = track["position"]
state.tracks.append(track_info)
state.mediums.append(medium or None)
state.medium_indices.append(medium_index or None)
else:
state.next_divisions.append(track["title"])
try:
state.divisions.pop()
except IndexError:
pass
state.index_tracks[state.index + 1] = track["title"]
return state

View file

@ -0,0 +1,67 @@
# This file is part of beets.
# Copyright 2025, Sarunas Nejus, Henry Oberholtzer.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import annotations
from typing import TYPE_CHECKING
from typing_extensions import NotRequired, TypedDict
if TYPE_CHECKING:
from beets.autotag.hooks import TrackInfo
class ReleaseFormat(TypedDict):
name: str
qty: int
descriptions: list[str] | None
class Artist(TypedDict):
name: str
anv: str
join: str
role: str
tracks: str
id: str
resource_url: str
class Track(TypedDict):
position: str
type_: str
title: str
duration: str
artists: list[Artist]
extraartists: NotRequired[list[Artist]]
sub_tracks: NotRequired[list[Track]]
class ArtistInfo(TypedDict):
artist: str
artists: list[str]
artist_credit: str
artists_credit: list[str]
artist_id: str
artists_ids: list[str]
class TracklistInfo(TypedDict):
index: int
index_tracks: dict[int, str]
tracks: list[TrackInfo]
divisions: list[str]
next_divisions: list[str]
mediums: list[str | None]
medium_indices: list[str | None]

View file

@ -148,7 +148,7 @@ class ExportPlugin(BeetsPlugin):
album=opts.album,
):
try:
data, item = data_emitter(included_keys or "*")
data, _ = data_emitter(included_keys or "*")
except (mediafile.UnreadableFileError, OSError) as ex:
self._log.error("cannot read file: {}", ex)
continue

View file

@ -355,7 +355,7 @@ class ArtSource(RequestMixin, ABC):
# Specify whether this source fetches local or remote images
LOC: ClassVar[SourceLocation]
# A list of methods to match metadata, sorted by descending accuracy
VALID_MATCHING_CRITERIA: list[str] = ["default"]
VALID_MATCHING_CRITERIA: ClassVar[list[str]] = ["default"]
# A human-readable name for the art source
NAME: ClassVar[str]
# The key to select the art source in the config. This value will also be
@ -518,8 +518,8 @@ class RemoteArtSource(ArtSource):
class CoverArtArchive(RemoteArtSource):
NAME = "Cover Art Archive"
ID = "coverart"
VALID_MATCHING_CRITERIA = ["release", "releasegroup"]
VALID_THUMBNAIL_SIZES = [250, 500, 1200]
VALID_MATCHING_CRITERIA: ClassVar[list[str]] = ["release", "releasegroup"]
VALID_THUMBNAIL_SIZES: ClassVar[list[int]] = [250, 500, 1200]
URL = "https://coverartarchive.org/release/{mbid}"
GROUP_URL = "https://coverartarchive.org/release-group/{mbid}"
@ -867,7 +867,7 @@ class ITunesStore(RemoteArtSource):
)
except KeyError as e:
self._log.debug(
"Malformed itunes candidate: {} not found in {}", # NOQA E501
"Malformed itunes candidate: {} not found in {}",
e,
list(c.keys()),
)
@ -1128,7 +1128,7 @@ class LastFM(RemoteArtSource):
ID = "lastfm"
# Sizes in priority order.
SIZES = OrderedDict(
SIZES: ClassVar[dict[str, tuple[int, int]]] = OrderedDict(
[
("mega", (300, 300)),
("extralarge", (300, 300)),
@ -1588,7 +1588,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
message = ui.colorize(
"text_highlight_minor", "has album art"
)
self._log.info("{}: {}", album, message)
ui.print_(f"{album}: {message}")
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
@ -1601,4 +1601,4 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
message = ui.colorize("text_success", "found album art")
else:
message = ui.colorize("text_error", "no art found")
self._log.info("{}: {}", album, message)
ui.print_(f"{album}: {message}")

View file

@ -88,7 +88,7 @@ def apply_matches(d, log):
"""Given a mapping from items to field dicts, apply the fields to
the objects.
"""
some_map = list(d.values())[0]
some_map = next(iter(d.values()))
keys = some_map.keys()
# Only proceed if the "tag" field is equal across all filenames.

View file

@ -61,11 +61,23 @@ def split_on_feat(
artist, which is always a string, and the featuring artist, which
may be a string or None if none is present.
"""
# split on the first "feat".
regex = re.compile(
plugins.feat_tokens(for_artist, custom_words), re.IGNORECASE
# Try explicit featuring tokens first (ft, feat, featuring, etc.)
# to avoid splitting on generic separators like "&" when both are present
regex_explicit = re.compile(
plugins.feat_tokens(for_artist=False, custom_words=custom_words),
re.IGNORECASE,
)
parts = tuple(s.strip() for s in regex.split(artist, 1))
parts = tuple(s.strip() for s in regex_explicit.split(artist, 1))
if len(parts) == 2:
return parts
# Fall back to all tokens including generic separators if no explicit match
if for_artist:
regex = re.compile(
plugins.feat_tokens(for_artist, custom_words), re.IGNORECASE
)
parts = tuple(s.strip() for s in regex.split(artist, 1))
if len(parts) == 1:
return parts[0], None
else:

View file

@ -62,7 +62,7 @@ class KeyFinderPlugin(BeetsPlugin):
try:
output = util.command_output(
command + [util.syspath(item.path)]
[*command, util.syspath(item.path)]
).stdout
except (subprocess.CalledProcessError, OSError) as exc:
self._log.error("execution failed: {}", exc)

View file

@ -28,7 +28,7 @@ import os
import traceback
from functools import singledispatchmethod
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable
from typing import TYPE_CHECKING, Any
import pylast
import yaml
@ -39,6 +39,7 @@ from beets.util import plurality, unique_list
if TYPE_CHECKING:
import optparse
from collections.abc import Callable
from beets.library import LibModel
@ -67,12 +68,12 @@ def flatten_tree(
if isinstance(elem, dict):
for k, v in elem.items():
flatten_tree(v, path + [k], branches)
flatten_tree(v, [*path, k], branches)
elif isinstance(elem, list):
for sub in elem:
flatten_tree(sub, path, branches)
else:
branches.append(path + [str(elem)])
branches.append([*path, str(elem)])
def find_parents(candidate: str, branches: list[list[str]]) -> list[str]:

View file

@ -2,15 +2,16 @@
import datetime
import musicbrainzngs
import requests
from beets import config, ui
from beets.plugins import BeetsPlugin
from beetsplug.lastimport import process_tracks
from ._utils.musicbrainz import MusicBrainzAPIMixin
class ListenBrainzPlugin(BeetsPlugin):
class ListenBrainzPlugin(MusicBrainzAPIMixin, BeetsPlugin):
"""A Beets plugin for interacting with ListenBrainz."""
ROOT = "http://api.listenbrainz.org/1/"
@ -129,17 +130,16 @@ class ListenBrainzPlugin(BeetsPlugin):
)
return tracks
def get_mb_recording_id(self, track):
def get_mb_recording_id(self, track) -> str | None:
"""Returns the MusicBrainz recording ID for a track."""
resp = musicbrainzngs.search_recordings(
query=track["track_metadata"].get("track_name"),
release=track["track_metadata"].get("release_name"),
strict=True,
results = self.mb_api.search(
"recording",
{
"": track["track_metadata"].get("track_name"),
"release": track["track_metadata"].get("release_name"),
},
)
if resp.get("recording-count") == "1":
return resp.get("recording-list")[0].get("id")
else:
return None
return next((r["id"] for r in results), None)
def get_playlists_createdfor(self, username):
"""Returns a list of playlists created by a user."""
@ -207,17 +207,16 @@ class ListenBrainzPlugin(BeetsPlugin):
track_info = []
for track in tracks:
identifier = track.get("identifier")
resp = musicbrainzngs.get_recording_by_id(
recording = self.mb_api.get_recording(
identifier, includes=["releases", "artist-credits"]
)
recording = resp.get("recording")
title = recording.get("title")
artist_credit = recording.get("artist-credit", [])
if artist_credit:
artist = artist_credit[0].get("artist", {}).get("name")
else:
artist = None
releases = recording.get("release-list", [])
releases = recording.get("releases", [])
if releases:
album = releases[0].get("title")
date = releases[0].get("date")

View file

@ -26,7 +26,7 @@ from functools import cached_property, partial, total_ordering
from html import unescape
from itertools import groupby
from pathlib import Path
from typing import TYPE_CHECKING, NamedTuple
from typing import TYPE_CHECKING, ClassVar, NamedTuple
from urllib.parse import quote, quote_plus, urlencode, urlparse
import langdetect
@ -367,7 +367,7 @@ class LRCLib(Backend):
class MusiXmatch(Backend):
URL_TEMPLATE = "https://www.musixmatch.com/lyrics/{}/{}"
REPLACEMENTS = {
REPLACEMENTS: ClassVar[dict[str, str]] = {
r"\s+": "-",
"<": "Less_Than",
">": "Greater_Than",
@ -600,7 +600,7 @@ class Google(SearchBackend):
SEARCH_URL = "https://www.googleapis.com/customsearch/v1"
#: Exclude some letras.mus.br pages which do not contain lyrics.
EXCLUDE_PAGES = [
EXCLUDE_PAGES: ClassVar[list[str]] = [
"significado.html",
"traduccion.html",
"traducao.html",
@ -630,9 +630,12 @@ class Google(SearchBackend):
#: Split cleaned up URL title into artist and title parts.
URL_TITLE_PARTS_RE = re.compile(r" +(?:[ :|-]+|par|by) +|, ")
SOURCE_DIST_FACTOR = {"www.azlyrics.com": 0.5, "www.songlyrics.com": 0.6}
SOURCE_DIST_FACTOR: ClassVar[dict[str, float]] = {
"www.azlyrics.com": 0.5,
"www.songlyrics.com": 0.6,
}
ignored_domains: set[str] = set()
ignored_domains: ClassVar[set[str]] = set()
@classmethod
def pre_process_html(cls, html: str) -> str:
@ -937,7 +940,7 @@ class RestFiles:
class LyricsPlugin(LyricsRequestHandler, plugins.BeetsPlugin):
BACKEND_BY_NAME = {
BACKEND_BY_NAME: ClassVar[dict[str, type[Backend]]] = {
b.name: b for b in [LRCLib, Google, Genius, Tekstowo, MusiXmatch]
}

View file

@ -13,48 +13,151 @@
# included in all copies or substantial portions of the Software.
from __future__ import annotations
import re
from dataclasses import dataclass, field
from functools import cached_property
from typing import TYPE_CHECKING, ClassVar
import musicbrainzngs
from requests.auth import HTTPDigestAuth
from beets import config, ui
from beets import __version__, config, ui
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand
SUBMISSION_CHUNK_SIZE = 200
FETCH_CHUNK_SIZE = 100
UUID_REGEX = r"^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$"
from ._utils.musicbrainz import MusicBrainzAPI
if TYPE_CHECKING:
from collections.abc import Iterable, Iterator
from requests import Response
from beets.importer import ImportSession, ImportTask
from beets.library import Album, Library
from ._typing import JSONDict
UUID_PAT = re.compile(r"^[a-f0-9]{8}(-[a-f0-9]{4}){3}-[a-f0-9]{12}$")
def mb_call(func, *args, **kwargs):
"""Call a MusicBrainz API function and catch exceptions."""
try:
return func(*args, **kwargs)
except musicbrainzngs.AuthenticationError:
raise ui.UserError("authentication with MusicBrainz failed")
except (musicbrainzngs.ResponseError, musicbrainzngs.NetworkError) as exc:
raise ui.UserError(f"MusicBrainz API error: {exc}")
except musicbrainzngs.UsageError:
raise ui.UserError("MusicBrainz credentials missing")
@dataclass
class MusicBrainzUserAPI(MusicBrainzAPI):
"""MusicBrainz API client with user authentication.
In order to retrieve private user collections and modify them, we need to
authenticate the requests with the user's MusicBrainz credentials.
def submit_albums(collection_id, release_ids):
"""Add all of the release IDs to the indicated collection. Multiple
requests are made if there are many release IDs to submit.
See documentation for authentication details:
https://musicbrainz.org/doc/MusicBrainz_API#Authentication
Note that the documentation misleadingly states HTTP 'basic' authentication,
and I had to reverse-engineer musicbrainzngs to discover that it actually
uses HTTP 'digest' authentication.
"""
for i in range(0, len(release_ids), SUBMISSION_CHUNK_SIZE):
chunk = release_ids[i : i + SUBMISSION_CHUNK_SIZE]
mb_call(musicbrainzngs.add_releases_to_collection, collection_id, chunk)
auth: HTTPDigestAuth = field(init=False)
class MusicBrainzCollectionPlugin(BeetsPlugin):
def __init__(self):
super().__init__()
def __post_init__(self) -> None:
super().__post_init__()
config["musicbrainz"]["pass"].redact = True
musicbrainzngs.auth(
self.auth = HTTPDigestAuth(
config["musicbrainz"]["user"].as_str(),
config["musicbrainz"]["pass"].as_str(),
)
def request(self, *args, **kwargs) -> Response:
"""Authenticate and include required client param in all requests."""
kwargs.setdefault("params", {})
kwargs["params"]["client"] = f"beets-{__version__}"
kwargs["auth"] = self.auth
return super().request(*args, **kwargs)
def browse_collections(self) -> list[JSONDict]:
"""Get all collections for the authenticated user."""
return self._browse("collection")
@dataclass
class MBCollection:
"""Representation of a user's MusicBrainz collection.
Provides convenient, chunked operations for retrieving releases and updating
the collection via the MusicBrainz web API. Fetch and submission limits are
controlled by class-level constants to avoid oversized requests.
"""
SUBMISSION_CHUNK_SIZE: ClassVar[int] = 200
FETCH_CHUNK_SIZE: ClassVar[int] = 100
data: JSONDict
mb_api: MusicBrainzUserAPI
@property
def id(self) -> str:
"""Unique identifier assigned to the collection by MusicBrainz."""
return self.data["id"]
@property
def release_count(self) -> int:
"""Total number of releases recorded in the collection."""
return self.data["release-count"]
@property
def releases_url(self) -> str:
"""Complete API endpoint URL for listing releases in this collection."""
return f"{self.mb_api.api_root}/collection/{self.id}/releases"
@property
def releases(self) -> list[JSONDict]:
"""Retrieve all releases in the collection, fetched in successive pages.
The fetch is performed in chunks and returns a flattened sequence of
release records.
"""
offsets = list(range(0, self.release_count, self.FETCH_CHUNK_SIZE))
return [r for offset in offsets for r in self.get_releases(offset)]
def get_releases(self, offset: int) -> list[JSONDict]:
"""Fetch a single page of releases beginning at a given position."""
return self.mb_api.get_json(
self.releases_url,
params={"limit": self.FETCH_CHUNK_SIZE, "offset": offset},
)["releases"]
@classmethod
def get_id_chunks(cls, id_list: list[str]) -> Iterator[list[str]]:
"""Yield successive sublists of identifiers sized for safe submission.
Splits a long sequence of identifiers into batches that respect the
service's submission limits to avoid oversized requests.
"""
for i in range(0, len(id_list), cls.SUBMISSION_CHUNK_SIZE):
yield id_list[i : i + cls.SUBMISSION_CHUNK_SIZE]
def add_releases(self, releases: list[str]) -> None:
"""Add releases to the collection in batches."""
for chunk in self.get_id_chunks(releases):
# Need to escape semicolons: https://github.com/psf/requests/issues/6990
self.mb_api.put(f"{self.releases_url}/{'%3B'.join(chunk)}")
def remove_releases(self, releases: list[str]) -> None:
"""Remove releases from the collection in chunks."""
for chunk in self.get_id_chunks(releases):
# Need to escape semicolons: https://github.com/psf/requests/issues/6990
self.mb_api.delete(f"{self.releases_url}/{'%3B'.join(chunk)}")
def submit_albums(collection: MBCollection, release_ids):
"""Add all of the release IDs to the indicated collection. Multiple
requests are made if there are many release IDs to submit.
"""
collection.add_releases(release_ids)
class MusicBrainzCollectionPlugin(BeetsPlugin):
def __init__(self) -> None:
super().__init__()
self.config.add(
{
"auto": False,
@ -65,47 +168,32 @@ class MusicBrainzCollectionPlugin(BeetsPlugin):
if self.config["auto"]:
self.import_stages = [self.imported]
def _get_collection(self):
collections = mb_call(musicbrainzngs.get_collections)
if not collections["collection-list"]:
@cached_property
def mb_api(self) -> MusicBrainzUserAPI:
return MusicBrainzUserAPI()
@cached_property
def collection(self) -> MBCollection:
if not (collections := self.mb_api.browse_collections()):
raise ui.UserError("no collections exist for user")
# Get all release collection IDs, avoiding event collections
collection_ids = [
x["id"]
for x in collections["collection-list"]
if x["entity-type"] == "release"
]
if not collection_ids:
if not (
collection_by_id := {
c["id"]: c for c in collections if c["entity-type"] == "release"
}
):
raise ui.UserError("No release collection found.")
# Check that the collection exists so we can present a nice error
collection = self.config["collection"].as_str()
if collection:
if collection not in collection_ids:
raise ui.UserError(f"invalid collection ID: {collection}")
return collection
if collection_id := self.config["collection"].as_str():
if not (collection := collection_by_id.get(collection_id)):
raise ui.UserError(f"invalid collection ID: {collection_id}")
else:
# No specified collection. Just return the first collection ID
collection = next(iter(collection_by_id.values()))
# No specified collection. Just return the first collection ID
return collection_ids[0]
def _get_albums_in_collection(self, id):
def _fetch(offset):
res = mb_call(
musicbrainzngs.get_releases_in_collection,
id,
limit=FETCH_CHUNK_SIZE,
offset=offset,
)["collection"]
return [x["id"] for x in res["release-list"]], res["release-count"]
offset = 0
albums_in_collection, release_count = _fetch(offset)
for i in range(0, release_count, FETCH_CHUNK_SIZE):
albums_in_collection += _fetch(offset)[0]
offset += FETCH_CHUNK_SIZE
return albums_in_collection
return MBCollection(collection, self.mb_api)
def commands(self):
mbupdate = Subcommand("mbupdate", help="Update MusicBrainz collection")
@ -120,45 +208,33 @@ class MusicBrainzCollectionPlugin(BeetsPlugin):
mbupdate.func = self.update_collection
return [mbupdate]
def remove_missing(self, collection_id, lib_albums):
lib_ids = {x.mb_albumid for x in lib_albums}
albums_in_collection = self._get_albums_in_collection(collection_id)
remove_me = list(set(albums_in_collection) - lib_ids)
for i in range(0, len(remove_me), FETCH_CHUNK_SIZE):
chunk = remove_me[i : i + FETCH_CHUNK_SIZE]
mb_call(
musicbrainzngs.remove_releases_from_collection,
collection_id,
chunk,
)
def update_collection(self, lib, opts, args):
def update_collection(self, lib: Library, opts, args) -> None:
self.config.set_args(opts)
remove_missing = self.config["remove"].get(bool)
self.update_album_list(lib, lib.albums(), remove_missing)
def imported(self, session, task):
def imported(self, session: ImportSession, task: ImportTask) -> None:
"""Add each imported album to the collection."""
if task.is_album:
self.update_album_list(session.lib, [task.album])
self.update_album_list(
session.lib, [task.album], remove_missing=False
)
def update_album_list(self, lib, album_list, remove_missing=False):
def update_album_list(
self, lib: Library, albums: Iterable[Album], remove_missing: bool
) -> None:
"""Update the MusicBrainz collection from a list of Beets albums"""
collection_id = self._get_collection()
collection = self.collection
# Get a list of all the album IDs.
album_ids = []
for album in album_list:
aid = album.mb_albumid
if aid:
if re.match(UUID_REGEX, aid):
album_ids.append(aid)
else:
self._log.info("skipping invalid MBID: {}", aid)
album_ids = [id_ for a in albums if UUID_PAT.match(id_ := a.mb_albumid)]
# Submit to MusicBrainz.
self._log.info("Updating MusicBrainz collection {}...", collection_id)
submit_albums(collection_id, album_ids)
self._log.info("Updating MusicBrainz collection {}...", collection.id)
collection.add_releases(album_ids)
if remove_missing:
self.remove_missing(collection_id, lib.albums())
lib_ids = {x.mb_albumid for x in lib.albums()}
albums_in_collection = {r["id"] for r in collection.releases}
collection.remove_releases(list(albums_in_collection - lib_ids))
self._log.info("...MusicBrainz collection updated.")

View file

@ -24,7 +24,7 @@ import mediafile
from typing_extensions import override
from beets import config
from beets.autotag.distance import Distance, distance
from beets.autotag.distance import distance
from beets.autotag.hooks import AlbumInfo
from beets.autotag.match import assign_items
from beets.plugins import find_plugins
@ -39,6 +39,7 @@ if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
from beets.autotag import AlbumMatch
from beets.autotag.distance import Distance
from beets.library import Item
from beetsplug._typing import JSONDict
@ -141,7 +142,7 @@ class MusicBrainzPseudoReleasePlugin(MusicBrainzPlugin):
if (ids := self._intercept_mb_release(release)) and (
album_id := self._extract_id(ids[0])
):
raw_pseudo_release = self.api.get_release(album_id)
raw_pseudo_release = self.mb_api.get_release(album_id)
pseudo_release = super().album_info(raw_pseudo_release)
if self.config["custom_tags_only"].get(bool):

View file

@ -69,7 +69,7 @@ class MBSubmitPlugin(BeetsPlugin):
paths.append(displayable_path(p))
try:
picard_path = self.config["picard_path"].as_str()
subprocess.Popen([picard_path] + paths)
subprocess.Popen([picard_path, *paths])
self._log.info("launched picard from\n{}", picard_path)
except OSError as exc:
self._log.error("Could not open picard, got error:\n{}", exc)

View file

@ -71,7 +71,7 @@ class MBSyncPlugin(BeetsPlugin):
"""Retrieve and apply info from the autotagger for items matched by
query.
"""
for item in lib.items(query + ["singleton:true"]):
for item in lib.items([*query, "singleton:true"]):
if not item.mb_trackid:
self._log.info(
"Skipping singleton with no mb_trackid: {}", item

View file

@ -14,14 +14,20 @@
"""Synchronize information from music player libraries"""
from __future__ import annotations
from abc import ABCMeta, abstractmethod
from importlib import import_module
from typing import TYPE_CHECKING, ClassVar
from confuse import ConfigValueError
from beets import ui
from beets.plugins import BeetsPlugin
if TYPE_CHECKING:
from beets.dbcore import types
METASYNC_MODULE = "beetsplug.metasync"
# Dictionary to map the MODULE and the CLASS NAME of meta sources
@ -32,8 +38,9 @@ SOURCES = {
class MetaSource(metaclass=ABCMeta):
item_types: ClassVar[dict[str, types.Type]]
def __init__(self, config, log):
self.item_types = {}
self.config = config
self._log = log

View file

@ -17,6 +17,7 @@
from datetime import datetime
from os.path import basename
from time import mktime
from typing import ClassVar
from xml.sax.saxutils import quoteattr
from beets.dbcore import types
@ -35,7 +36,7 @@ dbus = import_dbus()
class Amarok(MetaSource):
item_types = {
item_types: ClassVar[dict[str, types.Type]] = {
"amarok_rating": types.INTEGER,
"amarok_score": types.FLOAT,
"amarok_uid": types.STRING,

View file

@ -20,6 +20,7 @@ import shutil
import tempfile
from contextlib import contextmanager
from time import mktime
from typing import ClassVar
from urllib.parse import unquote, urlparse
from confuse import ConfigValueError
@ -58,7 +59,7 @@ def _norm_itunes_path(path):
class Itunes(MetaSource):
item_types = {
item_types: ClassVar[dict[str, types.Type]] = {
"itunes_rating": types.INTEGER, # 0..100 scale
"itunes_playcount": types.INTEGER,
"itunes_skipcount": types.INTEGER,

View file

@ -15,18 +15,26 @@
"""List missing tracks."""
from collections import defaultdict
from collections.abc import Iterator
from __future__ import annotations
import musicbrainzngs
from musicbrainzngs.musicbrainz import MusicBrainzError
from collections import defaultdict
from typing import TYPE_CHECKING, ClassVar
import requests
from beets import config, metadata_plugins
from beets.dbcore import types
from beets.library import Album, Item, Library
from beets.library import Item
from beets.plugins import BeetsPlugin
from beets.ui import Subcommand, print_
from ._utils.musicbrainz import MusicBrainzAPIMixin
if TYPE_CHECKING:
from collections.abc import Iterator
from beets.library import Album, Library
MB_ARTIST_QUERY = r"mb_albumartistid::^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$"
@ -85,10 +93,10 @@ def _item(track_info, album_info, album_id):
)
class MissingPlugin(BeetsPlugin):
class MissingPlugin(MusicBrainzAPIMixin, BeetsPlugin):
"""List missing tracks"""
album_types = {
album_types: ClassVar[dict[str, types.Type]] = {
"missing": types.INTEGER,
}
@ -189,19 +197,19 @@ class MissingPlugin(BeetsPlugin):
calculating_total = self.config["total"].get()
for (artist, artist_id), album_ids in album_ids_by_artist.items():
try:
resp = musicbrainzngs.browse_release_groups(artist=artist_id)
except MusicBrainzError as err:
resp = self.mb_api.browse_release_groups(artist=artist_id)
except requests.exceptions.RequestException:
self._log.info(
"Couldn't fetch info for artist '{}' ({}) - '{}'",
"Couldn't fetch info for artist '{}' ({})",
artist,
artist_id,
err,
exc_info=True,
)
continue
missing_titles = [
f"{artist} - {rg['title']}"
for rg in resp["release-group-list"]
for rg in resp
if rg["id"] not in album_ids
]

View file

@ -15,6 +15,7 @@
import os
import time
from typing import ClassVar
import mpd
@ -318,7 +319,7 @@ class MPDStats:
class MPDStatsPlugin(plugins.BeetsPlugin):
item_types = {
item_types: ClassVar[dict[str, types.Type]] = {
"play_count": types.INTEGER,
"skip_count": types.INTEGER,
"last_played": types.DATE,

View file

@ -16,17 +16,14 @@
from __future__ import annotations
import operator
from collections import Counter
from contextlib import suppress
from dataclasses import dataclass
from functools import cached_property, singledispatchmethod
from itertools import groupby, product
from functools import cached_property
from itertools import product
from typing import TYPE_CHECKING, Any
from urllib.parse import urljoin
from confuse.exceptions import NotFoundError
from requests_ratelimiter import LimiterMixin
import beets
import beets.autotag.hooks
@ -35,11 +32,8 @@ from beets.metadata_plugins import MetadataSourcePlugin
from beets.util.deprecation import deprecate_for_user
from beets.util.id_extractors import extract_release_id
from ._utils.requests import (
HTTPNotFoundError,
RequestHandler,
TimeoutAndRetrySession,
)
from ._utils.musicbrainz import MusicBrainzAPIMixin
from ._utils.requests import HTTPNotFoundError
if TYPE_CHECKING:
from collections.abc import Iterable, Sequence
@ -103,86 +97,6 @@ BROWSE_CHUNKSIZE = 100
BROWSE_MAXTRACKS = 500
class LimiterTimeoutSession(LimiterMixin, TimeoutAndRetrySession):
pass
@dataclass
class MusicBrainzAPI(RequestHandler):
api_host: str
rate_limit: float
def create_session(self) -> LimiterTimeoutSession:
return LimiterTimeoutSession(per_second=self.rate_limit)
def get_entity(
self, entity: str, inc_list: list[str] | None = None, **kwargs
) -> JSONDict:
if inc_list:
kwargs["inc"] = "+".join(inc_list)
return self._group_relations(
self.get_json(
f"{self.api_host}/ws/2/{entity}",
params={**kwargs, "fmt": "json"},
)
)
def get_release(self, id_: str) -> JSONDict:
return self.get_entity(f"release/{id_}", inc_list=RELEASE_INCLUDES)
def get_recording(self, id_: str) -> JSONDict:
return self.get_entity(f"recording/{id_}", inc_list=TRACK_INCLUDES)
def browse_recordings(self, **kwargs) -> list[JSONDict]:
kwargs.setdefault("limit", BROWSE_CHUNKSIZE)
kwargs.setdefault("inc_list", BROWSE_INCLUDES)
return self.get_entity("recording", **kwargs)["recordings"]
@singledispatchmethod
@classmethod
def _group_relations(cls, data: Any) -> Any:
"""Normalize MusicBrainz 'relations' into type-keyed fields recursively.
This helper rewrites payloads that use a generic 'relations' list into
a structure that is easier to consume downstream. When a mapping
contains 'relations', those entries are regrouped by their 'target-type'
and stored under keys like '<target-type>-relations'. The original
'relations' key is removed to avoid ambiguous access patterns.
The transformation is applied recursively so that nested objects and
sequences are normalized consistently, while non-container values are
left unchanged.
"""
return data
@_group_relations.register(list)
@classmethod
def _(cls, data: list[Any]) -> list[Any]:
return [cls._group_relations(i) for i in data]
@_group_relations.register(dict)
@classmethod
def _(cls, data: JSONDict) -> JSONDict:
for k, v in list(data.items()):
if k == "relations":
get_target_type = operator.methodcaller("get", "target-type")
for target_type, group in groupby(
sorted(v, key=get_target_type), get_target_type
):
relations = [
{k: v for k, v in item.items() if k != "target-type"}
for item in group
]
data[f"{target_type}-relations"] = cls._group_relations(
relations
)
data.pop("relations")
else:
data[k] = cls._group_relations(v)
return data
def _preferred_alias(
aliases: list[JSONDict], languages: list[str] | None = None
) -> JSONDict | None:
@ -333,8 +247,9 @@ def _preferred_release_event(
for country in preferred_countries:
for event in release.get("release-events", {}):
try:
if country in event["area"]["iso-3166-1-codes"]:
return country, event["date"]
if area := event.get("area"):
if country in area["iso-3166-1-codes"]:
return country, event["date"]
except KeyError:
pass
@ -405,25 +320,11 @@ def _merge_pseudo_and_actual_album(
return merged
class MusicBrainzPlugin(MetadataSourcePlugin):
class MusicBrainzPlugin(MusicBrainzAPIMixin, MetadataSourcePlugin):
@cached_property
def genres_field(self) -> str:
return f"{self.config['genres_tag'].as_choice(['genre', 'tag'])}s"
@cached_property
def api(self) -> MusicBrainzAPI:
hostname = self.config["host"].as_str()
if hostname == "musicbrainz.org":
hostname, rate_limit = "https://musicbrainz.org", 1.0
else:
https = self.config["https"].get(bool)
hostname = f"http{'s' if https else ''}://{hostname}"
rate_limit = (
self.config["ratelimit"].get(int)
/ self.config["ratelimit_interval"].as_number()
)
return MusicBrainzAPI(hostname, rate_limit)
def __init__(self):
"""Set up the python-musicbrainz-ngs module according to settings
from the beets configuration. This should be called at startup.
@ -431,10 +332,6 @@ class MusicBrainzPlugin(MetadataSourcePlugin):
super().__init__()
self.config.add(
{
"host": "musicbrainz.org",
"https": False,
"ratelimit": 1,
"ratelimit_interval": 1,
"genres": False,
"genres_tag": "genre",
"external_ids": {
@ -589,7 +486,9 @@ class MusicBrainzPlugin(MetadataSourcePlugin):
for i in range(0, ntracks, BROWSE_CHUNKSIZE):
self._log.debug("Retrieving tracks starting at {}", i)
recording_list.extend(
self.api.browse_recordings(release=release["id"], offset=i)
self.mb_api.browse_recordings(
release=release["id"], offset=i
)
)
track_map = {r["id"]: r for r in recording_list}
for medium in release["media"]:
@ -853,17 +752,9 @@ class MusicBrainzPlugin(MetadataSourcePlugin):
using the provided criteria. Handles API errors by converting them into
MusicBrainzAPIError exceptions with contextual information.
"""
query = " AND ".join(
f'{k}:"{_v}"'
for k, v in filters.items()
if (_v := v.lower().strip())
return self.mb_api.search(
query_type, filters, limit=self.config["search_limit"].get()
)
self._log.debug(
"Searching for MusicBrainz {}s with: {!r}", query_type, query
)
return self.api.get_entity(
query_type, query=query, limit=self.config["search_limit"].get()
)[f"{query_type}s"]
def candidates(
self,
@ -901,7 +792,7 @@ class MusicBrainzPlugin(MetadataSourcePlugin):
self._log.debug("Invalid MBID ({}).", album_id)
return None
res = self.api.get_release(albumid)
res = self.mb_api.get_release(albumid, includes=RELEASE_INCLUDES)
# resolve linked release relations
actual_res = None
@ -914,7 +805,9 @@ class MusicBrainzPlugin(MetadataSourcePlugin):
rel["type"] == "transl-tracklisting"
and rel["direction"] == "backward"
):
actual_res = self.api.get_release(rel["release"]["id"])
actual_res = self.mb_api.get_release(
rel["release"]["id"], includes=RELEASE_INCLUDES
)
# release is potentially a pseudo release
release = self.album_info(res)
@ -937,6 +830,8 @@ class MusicBrainzPlugin(MetadataSourcePlugin):
return None
with suppress(HTTPNotFoundError):
return self.track_info(self.api.get_recording(trackid))
return self.track_info(
self.mb_api.get_recording(trackid, includes=TRACK_INCLUDES)
)
return None

View file

@ -16,59 +16,19 @@
and work composition date
"""
import musicbrainzngs
from __future__ import annotations
from typing import Any
import requests
from beets import ui
from beets.plugins import BeetsPlugin
def direct_parent_id(mb_workid, work_date=None):
"""Given a Musicbrainz work id, find the id one of the works the work is
part of and the first composition date it encounters.
"""
work_info = musicbrainzngs.get_work_by_id(
mb_workid, includes=["work-rels", "artist-rels"]
)
if "artist-relation-list" in work_info["work"] and work_date is None:
for artist in work_info["work"]["artist-relation-list"]:
if artist["type"] == "composer":
if "end" in artist.keys():
work_date = artist["end"]
if "work-relation-list" in work_info["work"]:
for direct_parent in work_info["work"]["work-relation-list"]:
if (
direct_parent["type"] == "parts"
and direct_parent.get("direction") == "backward"
):
direct_id = direct_parent["work"]["id"]
return direct_id, work_date
return None, work_date
from ._utils.musicbrainz import MusicBrainzAPIMixin
def work_parent_id(mb_workid):
"""Find the parent work id and composition date of a work given its id."""
work_date = None
while True:
new_mb_workid, work_date = direct_parent_id(mb_workid, work_date)
if not new_mb_workid:
return mb_workid, work_date
mb_workid = new_mb_workid
return mb_workid, work_date
def find_parentwork_info(mb_workid):
"""Get the MusicBrainz information dict about a parent work, including
the artist relations, and the composition date for a work's parent work.
"""
parent_id, work_date = work_parent_id(mb_workid)
work_info = musicbrainzngs.get_work_by_id(
parent_id, includes=["artist-rels"]
)
return work_info, work_date
class ParentWorkPlugin(BeetsPlugin):
class ParentWorkPlugin(MusicBrainzAPIMixin, BeetsPlugin):
def __init__(self):
super().__init__()
@ -130,14 +90,13 @@ class ParentWorkPlugin(BeetsPlugin):
parentwork_info = {}
composer_exists = False
if "artist-relation-list" in work_info["work"]:
for artist in work_info["work"]["artist-relation-list"]:
if artist["type"] == "composer":
composer_exists = True
parent_composer.append(artist["artist"]["name"])
parent_composer_sort.append(artist["artist"]["sort-name"])
if "end" in artist.keys():
parentwork_info["parentwork_date"] = artist["end"]
for artist in work_info.get("artist-relations", []):
if artist["type"] == "composer":
composer_exists = True
parent_composer.append(artist["artist"]["name"])
parent_composer_sort.append(artist["artist"]["sort-name"])
if "end" in artist.keys():
parentwork_info["parentwork_date"] = artist["end"]
parentwork_info["parent_composer"] = ", ".join(parent_composer)
parentwork_info["parent_composer_sort"] = ", ".join(
@ -149,16 +108,14 @@ class ParentWorkPlugin(BeetsPlugin):
"no composer for {}; add one at "
"https://musicbrainz.org/work/{}",
item,
work_info["work"]["id"],
work_info["id"],
)
parentwork_info["parentwork"] = work_info["work"]["title"]
parentwork_info["mb_parentworkid"] = work_info["work"]["id"]
parentwork_info["parentwork"] = work_info["title"]
parentwork_info["mb_parentworkid"] = work_info["id"]
if "disambiguation" in work_info["work"]:
parentwork_info["parentwork_disambig"] = work_info["work"][
"disambiguation"
]
if "disambiguation" in work_info:
parentwork_info["parentwork_disambig"] = work_info["disambiguation"]
else:
parentwork_info["parentwork_disambig"] = None
@ -190,9 +147,9 @@ class ParentWorkPlugin(BeetsPlugin):
work_changed = item.parentwork_workid_current != item.mb_workid
if force or not hasparent or work_changed:
try:
work_info, work_date = find_parentwork_info(item.mb_workid)
except musicbrainzngs.musicbrainz.WebServiceError as e:
self._log.debug("error fetching work: {}", e)
work_info, work_date = self.find_parentwork_info(item.mb_workid)
except requests.exceptions.RequestException:
self._log.debug("error fetching work", item, exc_info=True)
return
parent_info = self.get_info(item, work_info)
parent_info["parentwork_workid_current"] = item.mb_workid
@ -233,3 +190,37 @@ class ParentWorkPlugin(BeetsPlugin):
"parentwork_date",
],
)
def find_parentwork_info(
self, mb_workid: str
) -> tuple[dict[str, Any], str | None]:
"""Get the MusicBrainz information dict about a parent work, including
the artist relations, and the composition date for a work's parent work.
"""
work_date = None
parent_id: str | None = mb_workid
while parent_id:
current_id = parent_id
work_info = self.mb_api.get_work(
current_id, includes=["work-rels", "artist-rels"]
)
work_date = work_date or next(
(
end
for a in work_info.get("artist-relations", [])
if a["type"] == "composer" and (end := a.get("end"))
),
None,
)
parent_id = next(
(
w["work"]["id"]
for w in work_info.get("work-relations", [])
if w["type"] == "parts" and w["direction"] == "backward"
),
None,
)
return work_info, work_date

View file

@ -10,17 +10,22 @@
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import annotations
import os
import tempfile
from collections.abc import Sequence
from pathlib import Path
from typing import TYPE_CHECKING, ClassVar
import beets
from beets.dbcore.query import BLOB_TYPE, InQuery
from beets.util import path_as_posix
if TYPE_CHECKING:
from collections.abc import Sequence
from beets.dbcore.query import FieldQueryType
def is_m3u_file(path: str) -> bool:
return Path(path).suffix.lower() in {".m3u", ".m3u8"}
@ -82,7 +87,9 @@ class PlaylistQuery(InQuery[bytes]):
class PlaylistPlugin(beets.plugins.BeetsPlugin):
item_queries = {"playlist": PlaylistQuery}
item_queries: ClassVar[dict[str, FieldQueryType]] = {
"playlist": PlaylistQuery
}
def __init__(self):
super().__init__()

View file

@ -1,12 +1,17 @@
from __future__ import annotations
import shutil
from pathlib import Path
from typing import TYPE_CHECKING
import mediafile
from beets import ui, util
from beets.library import Item, Library
from beets.plugins import BeetsPlugin
if TYPE_CHECKING:
from beets.library import Item, Library
class ReplacePlugin(BeetsPlugin):
def commands(self):

View file

@ -20,6 +20,7 @@ import enum
import math
import os
import queue
import shutil
import signal
import subprocess
import sys
@ -27,8 +28,9 @@ import warnings
from abc import ABC, abstractmethod
from dataclasses import dataclass
from multiprocessing.pool import ThreadPool
from pathlib import Path
from threading import Event, Thread
from typing import TYPE_CHECKING, Any, TypeVar
from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypeVar
from beets import ui
from beets.plugins import BeetsPlugin
@ -542,10 +544,20 @@ class FfmpegBackend(Backend):
# mpgain/aacgain CLI tool backend.
Tool = Literal["mp3rgain", "aacgain", "mp3gain"]
class CommandBackend(Backend):
NAME = "command"
SUPPORTED_FORMATS_BY_TOOL: ClassVar[dict[Tool, set[str]]] = {
"mp3rgain": {"AAC", "MP3"},
"aacgain": {"AAC", "MP3"},
"mp3gain": {"MP3"},
}
do_parallel = True
cmd_name: Tool
def __init__(self, config: ConfigView, log: Logger):
super().__init__(config, log)
config.add(
@ -555,25 +567,21 @@ class CommandBackend(Backend):
}
)
self.command: str = config["command"].as_str()
cmd_path: Path = Path(config["command"].as_str())
supported_tools = set(self.SUPPORTED_FORMATS_BY_TOOL)
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
f"replaygain command does not exist: {self.command}"
)
else:
# Check whether the program is in $PATH.
for cmd in ("mp3gain", "aacgain"):
try:
call([cmd, "-v"], self._log)
self.command = cmd
except OSError:
pass
if not self.command:
if (cmd_name := cmd_path.name) not in supported_tools:
raise FatalReplayGainError(
"no replaygain command found: install mp3gain or aacgain"
f"replaygain.command must be one of {supported_tools!r},"
f" not {cmd_name!r}"
)
if command_exec := shutil.which(str(cmd_path)):
self.command = command_exec
self.cmd_name = cmd_name # type: ignore[assignment]
else:
raise FatalReplayGainError(
f"replaygain command not found: {cmd_path}"
)
self.noclip = config["noclip"].get(bool)
@ -608,11 +616,7 @@ class CommandBackend(Backend):
def format_supported(self, item: Item) -> bool:
"""Checks whether the given item is supported by the selected tool."""
if "mp3gain" in self.command and item.format != "MP3":
return False
elif "aacgain" in self.command and item.format not in ("MP3", "AAC"):
return False
return True
return item.format in self.SUPPORTED_FORMATS_BY_TOOL[self.cmd_name]
def compute_gain(
self,
@ -642,11 +646,11 @@ class CommandBackend(Backend):
cmd: list[str] = [self.command, "-o", "-s", "s"]
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + ["-k"]
cmd = [*cmd, "-k"]
else:
# Disable clipping warning.
cmd = cmd + ["-c"]
cmd = cmd + ["-d", str(int(target_level - 89))]
cmd = [*cmd, "-c"]
cmd = [*cmd, "-d", str(int(target_level - 89))]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug("analyzing {} files", len(items))
@ -1105,7 +1109,7 @@ class AudioToolsBackend(Backend):
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(task.items)[0]
item = next(iter(task.items))
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)

View file

@ -17,13 +17,13 @@
from __future__ import annotations
import os
from typing import Any, TypeAlias
from typing import TYPE_CHECKING, Any, TypeAlias
from urllib.parse import quote
from urllib.request import pathname2url
from beets import ui
from beets.dbcore.query import ParsingError, Query, Sort
from beets.library import Album, Item, Library, parse_query_string
from beets.library import Album, Item, parse_query_string
from beets.plugins import BeetsPlugin
from beets.plugins import send as send_event
from beets.util import (
@ -36,6 +36,9 @@ from beets.util import (
syspath,
)
if TYPE_CHECKING:
from beets.library import Library
QueryAndSort = tuple[Query, Sort]
PlaylistQuery = Query | tuple[QueryAndSort, ...] | None
PlaylistMatch: TypeAlias = tuple[

View file

@ -27,7 +27,7 @@ import re
import threading
import time
import webbrowser
from typing import TYPE_CHECKING, Any, Literal, Union
from typing import TYPE_CHECKING, Any, ClassVar, Literal
import confuse
import requests
@ -36,16 +36,13 @@ from beets import ui
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.dbcore import types
from beets.library import Library
from beets.metadata_plugins import (
IDResponse,
SearchApiMetadataSourcePlugin,
SearchFilter,
)
from beets.metadata_plugins import IDResponse, SearchApiMetadataSourcePlugin
if TYPE_CHECKING:
from collections.abc import Sequence
from beets.library import Library
from beets.metadata_plugins import SearchFilter
from beetsplug._typing import JSONDict
DEFAULT_WAITING_TIME = 5
@ -89,11 +86,9 @@ class AudioFeaturesUnavailableError(Exception):
class SpotifyPlugin(
SearchApiMetadataSourcePlugin[
Union[SearchResponseAlbums, SearchResponseTracks]
]
SearchApiMetadataSourcePlugin[SearchResponseAlbums | SearchResponseTracks]
):
item_types = {
item_types: ClassVar[dict[str, types.Type]] = {
"spotify_track_popularity": types.INTEGER,
"spotify_acousticness": types.FLOAT,
"spotify_danceability": types.FLOAT,
@ -119,7 +114,7 @@ class SpotifyPlugin(
track_url = "https://api.spotify.com/v1/tracks/"
audio_features_url = "https://api.spotify.com/v1/audio-features/"
spotify_audio_features = {
spotify_audio_features: ClassVar[dict[str, str]] = {
"acousticness": "spotify_acousticness",
"danceability": "spotify_danceability",
"energy": "spotify_energy",
@ -144,7 +139,7 @@ class SpotifyPlugin(
"region_filter": None,
"regex": [],
"client_id": "4e414367a1d14c75a5c5129a627fcab8",
"client_secret": "f82bdc09b2254f1a8286815d02fd46dc",
"client_secret": "4a9b5b7848e54e118a7523b1c7c3e1e5",
"tokenfile": "spotify_token.json",
}
)

View file

@ -15,6 +15,7 @@
"""Moves patterns in path formats (suitable for moving articles)."""
import re
from typing import ClassVar
from beets.plugins import BeetsPlugin
@ -27,7 +28,7 @@ FORMAT = "{}, {}"
class ThePlugin(BeetsPlugin):
patterns: list[str] = []
patterns: ClassVar[list[str]] = []
def __init__(self):
super().__init__()
@ -58,9 +59,9 @@ class ThePlugin(BeetsPlugin):
p,
)
if self.config["a"]:
self.patterns = [PATTERN_A] + self.patterns
self.patterns = [PATTERN_A, *self.patterns]
if self.config["the"]:
self.patterns = [PATTERN_THE] + self.patterns
self.patterns = [PATTERN_THE, *self.patterns]
if not self.patterns:
self._log.warning("no patterns defined!")

View file

@ -16,18 +16,23 @@
Title case logic is derived from the python-titlecase library.
Provides a template function and a tag modification function."""
from __future__ import annotations
import re
from functools import cached_property
from typing import TypedDict
from typing import TYPE_CHECKING, TypedDict
from titlecase import titlecase
from beets import ui
from beets.autotag.hooks import AlbumInfo, Info
from beets.importer import ImportSession, ImportTask
from beets.library import Item
from beets.autotag.hooks import AlbumInfo
from beets.plugins import BeetsPlugin
if TYPE_CHECKING:
from beets.autotag.hooks import Info
from beets.importer import ImportSession, ImportTask
from beets.library import Item
__author__ = "henryoberholtzer@gmail.com"
__version__ = "1.0"

View file

@ -25,3 +25,14 @@
{% endblock %}
.. rubric:: {{ _('Methods definition') }}
{% if objname in related_typeddicts %}
Related TypedDicts
------------------
{% for typeddict in related_typeddicts[objname] %}
.. autotypeddict:: {{ typeddict }}
:show-inheritance:
{% endfor %}
{% endif %}

View file

@ -6,4 +6,5 @@ API Reference
:titlesonly:
plugins
plugin_utilities
database

View file

@ -0,0 +1,16 @@
Plugin Utilities
================
.. currentmodule:: beetsplug._utils.requests
.. autosummary::
:toctree: generated/
RequestHandler
.. currentmodule:: beetsplug._utils.musicbrainz
.. autosummary::
:toctree: generated/
MusicBrainzAPI

View file

@ -20,23 +20,24 @@ New features:
- :doc:`plugins/ftintitle`: Added argument to skip the processing of artist and
album artist are the same in ftintitle.
- :doc:`plugins/play`: Added `$playlist` marker to precisely edit the playlist
filepath into the command calling the player program.
- :doc:`plugins/lastgenre`: For tuning plugin settings ``-vvv`` can be passed
to receive extra verbose logging around last.fm results and how they are
resolved. The ``extended_debug`` config setting and ``--debug`` option
have been removed.
filepath into the command calling the player program.
- :doc:`plugins/lastgenre`: For tuning plugin settings ``-vvv`` can be passed to
receive extra verbose logging around last.fm results and how they are
resolved. The ``extended_debug`` config setting and ``--debug`` option have
been removed.
- :doc:`plugins/importsource`: Added new plugin that tracks original import
paths and optionally suggests removing source files when items are removed
from the library.
- :doc:`plugins/mbpseudo`: Add a new `mbpseudo` plugin to proactively receive
MusicBrainz pseudo-releases as recommendations during import.
MusicBrainz pseudo-releases as recommendations during import.
- Added support for Python 3.13.
- :doc:`/plugins/convert`: ``force`` can be passed to override checks like
no_convert, never_convert_lossy_files, same format, and max_bitrate
- :doc:`plugins/titlecase`: Add the `titlecase` plugin to allow users to
resolve differences in metadata source styles.
- :doc:`plugins/titlecase`: Add the `titlecase` plugin to allow users to resolve
differences in metadata source styles.
- :doc:`plugins/spotify`: Added support for multi-artist albums and tracks,
saving all contributing artists to the respective fields.
saving all contributing artists to the respective fields.
- :doc:`plugins/fetchart`: Fix colorized output text.
- :doc:`plugins/ftintitle`: Featured artists are now inserted before brackets
containing remix/edit-related keywords (e.g., "Remix", "Live", "Edit") instead
of being appended at the end. This improves formatting for titles like "Song 1
@ -44,6 +45,7 @@ New features:
of brackets are supported and a new ``bracket_keywords`` configuration option
allows customizing the keywords. Setting ``bracket_keywords`` to an empty list
matches any bracket content regardless of keywords.
- :doc:`plugins/discogs`: Added support for multi value fields. :bug:`6068`
- :doc:`plugins/embedart`: Embedded arts can now be cleared during import with
the ``clearart_on_import`` config option. Also, ``beet clearart`` is only
going to update the files matching the query and with an embedded art, leaving
@ -51,6 +53,9 @@ New features:
Bug fixes:
- Handle potential OSError when unlinking temporary files in ArtResizer.
:bug:`5615`
- :doc:`/plugins/spotify`: Updated Spotify API credentials. :bug:`6270`
- :doc:`/plugins/smartplaylist`: Fixed an issue where multiple queries in a
playlist configuration were not preserving their order, causing items to
appear in database order rather than the order specified in the config.
@ -89,12 +94,37 @@ Bug fixes:
name (like "feat.", "+", or "&") prevent it. Using the albumartists list field
and fetching a genre for each artist separately improves the chance of
receiving valid results in that stage.
- :doc:`/plugins/ftintitle`: Fixed artist name splitting to prioritize explicit
featuring tokens (feat, ft, featuring) over generic separators (&, and),
preventing incorrect splits when both are present.
- :doc:`reference/cli`: Fix 'from_scratch' option for singleton imports: delete
all (old) metadata when new metadata is applied. :bug:`3706`
- :doc:`/plugins/convert`: ``auto_keep`` now respects ``no_convert`` and
``never_convert_lossy_files`` when deciding whether to copy/transcode items,
avoiding extra lossy duplicates.
- :doc:`plugins/discogs`: Fixed unexpected flex attr from the Discogs plugin.
:bug:`6177`
For plugin developers:
- A new plugin event, ``album_matched``, is sent when an album that is being
imported has been matched to its metadata and the corresponding distance has
been calculated.
- Added a reusable requests handler which can be used by plugins to make HTTP
requests with built-in retry and backoff logic. It uses beets user-agent and
configures timeouts. See :class:`~beetsplug._utils.requests.RequestHandler`
for documentation.
- Replaced dependency on ``python-musicbrainzngs`` with a lightweight custom
MusicBrainz client implementation and updated relevant plugins accordingly:
- :doc:`plugins/listenbrainz`
- :doc:`plugins/mbcollection`
- :doc:`plugins/mbpseudo`
- :doc:`plugins/missing`
- :doc:`plugins/musicbrainz`
- :doc:`plugins/parentwork`
See :class:`~beetsplug._utils.musicbrainz.MusicBrainzAPI` for documentation.
For packagers:

View file

@ -32,9 +32,22 @@ extensions = [
"sphinx_design",
"sphinx_copybutton",
"conf",
"sphinx_toolbox.more_autodoc.autotypeddict",
]
autosummary_generate = True
autosummary_context = {
"related_typeddicts": {
"MusicBrainzAPI": [
"beetsplug._utils.musicbrainz.LookupKwargs",
"beetsplug._utils.musicbrainz.SearchKwargs",
"beetsplug._utils.musicbrainz.BrowseKwargs",
"beetsplug._utils.musicbrainz.BrowseRecordingsKwargs",
"beetsplug._utils.musicbrainz.BrowseReleaseGroupsKwargs",
],
}
}
autodoc_member_order = "bysource"
exclude_patterns = ["_build"]
templates_path = ["_templates"]
source_suffix = {".rst": "restructuredtext", ".md": "markdown"}

View file

@ -72,10 +72,10 @@ class ConfDomain(Domain):
name = "conf"
label = "Simple Configuration"
object_types = {"conf": ObjType("conf", "conf")}
directives = {"conf": Conf}
roles = {"conf": XRefRole()}
initial_data: dict[str, Any] = {"objects": {}}
object_types = {"conf": ObjType("conf", "conf")} # noqa: RUF012
directives = {"conf": Conf} # noqa: RUF012
roles = {"conf": XRefRole()} # noqa: RUF012
initial_data: dict[str, Any] = {"objects": {}} # noqa: RUF012
def get_objects(self) -> Iterable[tuple[str, str, str, str, str, int]]:
"""Return an iterable of object tuples for the inventory."""

View file

@ -622,7 +622,7 @@ beets-youtube_
.. _beets-setlister: https://github.com/tomjaspers/beets-setlister
.. _beets-usertag: https://github.com/igordertigor/beets-usertag
.. _beets-usertag: https://github.com/edgars-supe/beets-usertag
.. _beets-webm3u: https://github.com/mgoltzsche/beets-webm3u

View file

@ -6,15 +6,16 @@ ListenBrainz Plugin
The ListenBrainz plugin for beets allows you to interact with the ListenBrainz
service.
Installation
------------
Configuration
-------------
To use the ``listenbrainz`` plugin, first enable it in your configuration (see
:ref:`using-plugins`). Then, install ``beets`` with ``listenbrainz`` extra
To enable the ListenBrainz plugin, add the following to your beets configuration
file (config.yaml_):
.. code-block:: bash
.. code-block:: yaml
pip install "beets[listenbrainz]"
plugins:
- listenbrainz
You can then configure the plugin by providing your Listenbrainz token (see
intructions here_) and username:

View file

@ -6,18 +6,9 @@ maintain your `music collection`_ list there.
.. _music collection: https://musicbrainz.org/doc/Collections
Installation
------------
To use the ``mbcollection`` plugin, first enable it in your configuration (see
:ref:`using-plugins`). Then, install ``beets`` with ``mbcollection`` extra
.. code-block:: bash
pip install "beets[mbcollection]"
Then, add your MusicBrainz username and password to your :doc:`configuration
file </reference/config>` under a ``musicbrainz`` section:
To begin, just enable the ``mbcollection`` plugin in your configuration (see
:ref:`using-plugins`). Then, add your MusicBrainz username and password to your
:doc:`configuration file </reference/config>` under a ``musicbrainz`` section:
::

View file

@ -5,16 +5,6 @@ This plugin adds a new command, ``missing`` or ``miss``, which finds and lists
missing tracks for albums in your collection. Each album requires one network
call to album data source.
Installation
------------
To use the ``missing`` plugin, first enable it in your configuration (see
:ref:`using-plugins`). Then, install ``beets`` with ``missing`` extra
.. code-block:: bash
pip install "beets[missing]"
Usage
-----

View file

@ -38,16 +38,6 @@ This plugin adds seven tags:
to keep track of recordings whose works have changed.
- **parentwork_date**: The composition date of the parent work.
Installation
------------
To use the ``parentwork`` plugin, first enable it in your configuration (see
:ref:`using-plugins`). Then, install ``beets`` with ``parentwork`` extra
.. code-block:: bash
pip install "beets[parentwork]"
Configuration
-------------

View file

@ -10,9 +10,9 @@ Installation
------------
This plugin can use one of many backends to compute the ReplayGain values:
GStreamer, mp3gain (and its cousin, aacgain), Python Audio Tools or ffmpeg.
ffmpeg and mp3gain can be easier to install. mp3gain supports less audio formats
than the other backend.
GStreamer, mp3gain (and its cousins, aacgain and mp3rgain), Python Audio Tools
or ffmpeg. ffmpeg and mp3gain can be easier to install. mp3gain supports fewer
audio formats than the other backends.
Once installed, this plugin analyzes all files during the import process. This
can be a slow process; to instead analyze after the fact, disable automatic
@ -51,16 +51,59 @@ configuration file:
The GStreamer backend does not support parallel analysis.
mp3gain and aacgain
~~~~~~~~~~~~~~~~~~~
Supported ``command`` backends
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In order to use this backend, you will need to install the mp3gain_ command-line
tool or the aacgain_ fork thereof. Here are some hints:
In order to use this backend, you will need to install a supported command-line
tool:
- mp3gain_ (MP3 only)
- aacgain_ (MP3, AAC/M4A)
- mp3rgain_ (MP3, AAC/M4A)
mp3gain
+++++++
- On Mac OS X, you can use Homebrew_. Type ``brew install aacgain``.
- On Linux, mp3gain_ is probably in your repositories. On Debian or Ubuntu, for
example, you can run ``apt-get install mp3gain``.
- On Windows, download and install the original mp3gain_.
- On Windows, download and install mp3gain_.
aacgain
+++++++
- On macOS, install via Homebrew_: ``brew install aacgain``.
- For other platforms, download from aacgain_ or use a compatible fork if
available for your system.
mp3rgain
++++++++
mp3rgain_ is a modern Rust rewrite of ``mp3gain`` that also supports AAC/M4A
files. It addresses security vulnerability CVE-2019-18359 present in the
original mp3gain and works on modern systems including Windows 11 and macOS with
Apple Silicon.
- On macOS, install via Homebrew_: ``brew install mp3rgain``.
- On Linux, install via Nix: ``nix-env -iA nixpkgs.mp3rgain`` or from your
distribution packaging (for example, AUR on Arch Linux).
- On Windows, download and install mp3rgain_.
Configuration
+++++++++++++
.. code-block:: yaml
replaygain:
backend: command
command: # mp3rgain, mp3gain, or aacgain
If beets doesn't automatically find the command executable, you can configure
the path explicitly like so:
.. code-block:: yaml
replaygain:
command: /Applications/MacMP3Gain.app/Contents/Resources/aacgain
.. _aacgain: https://aacgain.altosdesign.com
@ -68,21 +111,7 @@ tool or the aacgain_ fork thereof. Here are some hints:
.. _mp3gain: http://mp3gain.sourceforge.net/download.php
Then, enable the plugin (see :ref:`using-plugins`) and specify the "command"
backend in your configuration file:
::
replaygain:
backend: command
If beets doesn't automatically find the ``mp3gain`` or ``aacgain`` executable,
you can configure the path explicitly like so:
::
replaygain:
command: /Applications/MacMP3Gain.app/Contents/Resources/aacgain
.. _mp3rgain: https://github.com/M-Igashi/mp3rgain
Python Audio Tools
~~~~~~~~~~~~~~~~~~
@ -144,10 +173,8 @@ file. The available options are:
These options only work with the "command" backend:
- **command**: The path to the ``mp3gain`` or ``aacgain`` executable (if beets
cannot find it by itself). For example:
``/Applications/MacMP3Gain.app/Contents/Resources/aacgain``. Default: Search
in your ``$PATH``.
- **command**: Name or path to your command backend of choice: either of
``mp3gain``, ``aacgain`` or ``mp3rgain``.
- **noclip**: Reduce the amount of ReplayGain adjustment to whatever amount
would keep clipping from occurring. Default: ``yes``.

418
poetry.lock generated
View file

@ -49,6 +49,42 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
trio = ["trio (>=0.31.0)"]
[[package]]
name = "apeye"
version = "1.4.1"
description = "Handy tools for working with URLs and APIs."
optional = true
python-versions = ">=3.6.1"
files = [
{file = "apeye-1.4.1-py3-none-any.whl", hash = "sha256:44e58a9104ec189bf42e76b3a7fe91e2b2879d96d48e9a77e5e32ff699c9204e"},
{file = "apeye-1.4.1.tar.gz", hash = "sha256:14ea542fad689e3bfdbda2189a354a4908e90aee4bf84c15ab75d68453d76a36"},
]
[package.dependencies]
apeye-core = ">=1.0.0b2"
domdf-python-tools = ">=2.6.0"
platformdirs = ">=2.3.0"
requests = ">=2.24.0"
[package.extras]
all = ["cachecontrol[filecache] (>=0.12.6)", "lockfile (>=0.12.2)"]
limiter = ["cachecontrol[filecache] (>=0.12.6)", "lockfile (>=0.12.2)"]
[[package]]
name = "apeye-core"
version = "1.1.5"
description = "Core (offline) functionality for the apeye library."
optional = true
python-versions = ">=3.6.1"
files = [
{file = "apeye_core-1.1.5-py3-none-any.whl", hash = "sha256:dc27a93f8c9e246b3b238c5ea51edf6115ab2618ef029b9f2d9a190ec8228fbf"},
{file = "apeye_core-1.1.5.tar.gz", hash = "sha256:5de72ed3d00cc9b20fea55e54b7ab8f5ef8500eb33a5368bc162a5585e238a55"},
]
[package.dependencies]
domdf-python-tools = ">=2.6.0"
idna = ">=2.5"
[[package]]
name = "appdirs"
version = "1.4.4"
@ -138,6 +174,20 @@ gi = ["pygobject (>=3.54.2,<4.0.0)"]
mad = ["pymad[mad] (>=0.11.3,<0.12.0)"]
test = ["pytest (>=8.4.2)", "pytest-cov (>=7.0.0)"]
[[package]]
name = "autodocsumm"
version = "0.2.14"
description = "Extended sphinx autodoc including automatic autosummaries"
optional = true
python-versions = ">=3.7"
files = [
{file = "autodocsumm-0.2.14-py3-none-any.whl", hash = "sha256:3bad8717fc5190802c60392a7ab04b9f3c97aa9efa8b3780b3d81d615bfe5dc0"},
{file = "autodocsumm-0.2.14.tar.gz", hash = "sha256:2839a9d4facc3c4eccd306c08695540911042b46eeafcdc3203e6d0bab40bc77"},
]
[package.dependencies]
Sphinx = ">=4.0,<9.0"
[[package]]
name = "babel"
version = "2.17.0"
@ -405,6 +455,27 @@ files = [
[package.dependencies]
cffi = ">=1.0.0"
[[package]]
name = "cachecontrol"
version = "0.14.4"
description = "httplib2 caching for requests"
optional = true
python-versions = ">=3.10"
files = [
{file = "cachecontrol-0.14.4-py3-none-any.whl", hash = "sha256:b7ac014ff72ee199b5f8af1de29d60239954f223e948196fa3d84adaffc71d2b"},
{file = "cachecontrol-0.14.4.tar.gz", hash = "sha256:e6220afafa4c22a47dd0badb319f84475d79108100d04e26e8542ef7d3ab05a1"},
]
[package.dependencies]
filelock = {version = ">=3.8.0", optional = true, markers = "extra == \"filecache\""}
msgpack = ">=0.5.2,<2.0.0"
requests = ">=2.16.0"
[package.extras]
dev = ["cachecontrol[filecache,redis]", "cheroot (>=11.1.2)", "cherrypy", "codespell", "furo", "mypy", "pytest", "pytest-cov", "ruff", "sphinx", "sphinx-copybutton", "types-redis", "types-requests"]
filecache = ["filelock (>=3.8.0)"]
redis = ["redis (>=2.10.5)"]
[[package]]
name = "certifi"
version = "2025.10.5"
@ -795,6 +866,24 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1
[package.extras]
toml = ["tomli"]
[[package]]
name = "cssutils"
version = "2.11.1"
description = "A CSS Cascading Style Sheets library for Python"
optional = true
python-versions = ">=3.8"
files = [
{file = "cssutils-2.11.1-py3-none-any.whl", hash = "sha256:a67bfdfdff4f3867fab43698ec4897c1a828eca5973f4073321b3bccaf1199b1"},
{file = "cssutils-2.11.1.tar.gz", hash = "sha256:0563a76513b6af6eebbe788c3bf3d01c920e46b3f90c8416738c5cfc773ff8e2"},
]
[package.dependencies]
more-itertools = "*"
[package.extras]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
test = ["cssselect", "importlib-resources", "jaraco.test (>=5.1)", "lxml", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
[[package]]
name = "dbus-python"
version = "1.4.0"
@ -820,6 +909,21 @@ files = [
{file = "decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360"},
]
[[package]]
name = "dict2css"
version = "0.3.0.post1"
description = "A μ-library for constructing cascading style sheets from Python dictionaries."
optional = true
python-versions = ">=3.6"
files = [
{file = "dict2css-0.3.0.post1-py3-none-any.whl", hash = "sha256:f006a6b774c3e31869015122ae82c491fd25e7de4a75607a62aa3e798f837e0d"},
{file = "dict2css-0.3.0.post1.tar.gz", hash = "sha256:89c544c21c4ca7472c3fffb9d37d3d926f606329afdb751dc1de67a411b70719"},
]
[package.dependencies]
cssutils = ">=2.2.0"
domdf-python-tools = ">=2.2.0"
[[package]]
name = "docstrfmt"
version = "1.11.1"
@ -860,6 +964,25 @@ files = [
{file = "docutils-0.21.2.tar.gz", hash = "sha256:3a6b18732edf182daa3cd12775bbb338cf5691468f91eeeb109deff6ebfa986f"},
]
[[package]]
name = "domdf-python-tools"
version = "3.10.0"
description = "Helpful functions for Python🐍🛠"
optional = true
python-versions = ">=3.6"
files = [
{file = "domdf_python_tools-3.10.0-py3-none-any.whl", hash = "sha256:5e71c1be71bbcc1f881d690c8984b60e64298ec256903b3147f068bc33090c36"},
{file = "domdf_python_tools-3.10.0.tar.gz", hash = "sha256:2ae308d2f4f1e9145f5f4ba57f840fbfd1c2983ee26e4824347789649d3ae298"},
]
[package.dependencies]
natsort = ">=7.0.1"
typing-extensions = ">=3.7.4.1"
[package.extras]
all = ["pytz (>=2019.1)"]
dates = ["pytz (>=2019.1)"]
[[package]]
name = "exceptiongroup"
version = "1.3.0"
@ -877,6 +1000,17 @@ typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""}
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "filelock"
version = "3.20.2"
description = "A platform independent file lock."
optional = true
python-versions = ">=3.10"
files = [
{file = "filelock-3.20.2-py3-none-any.whl", hash = "sha256:fbba7237d6ea277175a32c54bb71ef814a8546d8601269e1bfc388de333974e8"},
{file = "filelock-3.20.2.tar.gz", hash = "sha256:a2241ff4ddde2a7cebddf78e39832509cb045d18ec1a09d7248d6bfc6bfbbe64"},
]
[[package]]
name = "filetype"
version = "1.2.0"
@ -937,6 +1071,27 @@ files = [
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
[[package]]
name = "html5lib"
version = "1.1"
description = "HTML parser based on the WHATWG HTML specification"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
{file = "html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d"},
{file = "html5lib-1.1.tar.gz", hash = "sha256:b2e5b40261e20f354d198eae92afc10d750afb487ed5e50f9c4eaf07c184146f"},
]
[package.dependencies]
six = ">=1.9"
webencodings = "*"
[package.extras]
all = ["chardet (>=2.2)", "genshi", "lxml"]
chardet = ["chardet (>=2.2)"]
genshi = ["genshi"]
lxml = ["lxml"]
[[package]]
name = "httpcore"
version = "1.0.9"
@ -1731,6 +1886,17 @@ mutagen = ">=1.46"
[package.extras]
test = ["tox"]
[[package]]
name = "more-itertools"
version = "10.8.0"
description = "More routines for operating on iterables, beyond itertools"
optional = true
python-versions = ">=3.9"
files = [
{file = "more_itertools-10.8.0-py3-none-any.whl", hash = "sha256:52d4362373dcf7c52546bc4af9a86ee7c4579df9a8dc268be0a2f949d376cc9b"},
{file = "more_itertools-10.8.0.tar.gz", hash = "sha256:f638ddf8a1a0d134181275fb5d58b086ead7c6a72429ad725c67503f13ba30bd"},
]
[[package]]
name = "msgpack"
version = "1.1.2"
@ -1818,17 +1984,6 @@ check = ["check-manifest", "flake8", "flake8-black", "isort (>=5.0.3)", "pygment
test = ["coverage[toml] (>=5.2)", "coveralls (>=2.1.1)", "hypothesis", "pyannotate", "pytest", "pytest-cov"]
type = ["mypy", "mypy-extensions"]
[[package]]
name = "musicbrainzngs"
version = "0.7.1"
description = "Python bindings for the MusicBrainz NGS and the Cover Art Archive webservices"
optional = true
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
files = [
{file = "musicbrainzngs-0.7.1-py2.py3-none-any.whl", hash = "sha256:e841a8f975104c0a72290b09f59326050194081a5ae62ee512f41915090e1a10"},
{file = "musicbrainzngs-0.7.1.tar.gz", hash = "sha256:ab1c0100fd0b305852e65f2ed4113c6de12e68afd55186987b8ed97e0f98e627"},
]
[[package]]
name = "mutagen"
version = "1.47.0"
@ -1911,6 +2066,21 @@ files = [
{file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"},
]
[[package]]
name = "natsort"
version = "8.4.0"
description = "Simple yet flexible natural sorting in Python."
optional = true
python-versions = ">=3.7"
files = [
{file = "natsort-8.4.0-py3-none-any.whl", hash = "sha256:4732914fb471f56b5cce04d7bae6f164a592c7712e1c85f9ef585e197299521c"},
{file = "natsort-8.4.0.tar.gz", hash = "sha256:45312c4a0e5507593da193dedd04abb1469253b601ecaf63445ad80f0a1ea581"},
]
[package.extras]
fast = ["fastnumbers (>=2.0.0)"]
icu = ["PyICU (>=1.0.0)"]
[[package]]
name = "numba"
version = "0.62.1"
@ -3303,6 +3473,94 @@ files = [
{file = "roman-5.1.tar.gz", hash = "sha256:3a86572e9bc9183e771769601189e5fa32f1620ffeceebb9eca836affb409986"},
]
[[package]]
name = "ruamel-yaml"
version = "0.18.16"
description = "ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order"
optional = true
python-versions = ">=3.8"
files = [
{file = "ruamel.yaml-0.18.16-py3-none-any.whl", hash = "sha256:048f26d64245bae57a4f9ef6feb5b552a386830ef7a826f235ffb804c59efbba"},
{file = "ruamel.yaml-0.18.16.tar.gz", hash = "sha256:a6e587512f3c998b2225d68aa1f35111c29fad14aed561a26e73fab729ec5e5a"},
]
[package.dependencies]
"ruamel.yaml.clib" = {version = ">=0.2.7", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.14\""}
[package.extras]
docs = ["mercurial (>5.7)", "ryd"]
jinja2 = ["ruamel.yaml.jinja2 (>=0.2)"]
[[package]]
name = "ruamel-yaml-clib"
version = "0.2.15"
description = "C version of reader, parser and emitter for ruamel.yaml derived from libyaml"
optional = true
python-versions = ">=3.9"
files = [
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:88eea8baf72f0ccf232c22124d122a7f26e8a24110a0273d9bcddcb0f7e1fa03"},
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9b6f7d74d094d1f3a4e157278da97752f16ee230080ae331fcc219056ca54f77"},
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4be366220090d7c3424ac2b71c90d1044ea34fca8c0b88f250064fd06087e614"},
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1f66f600833af58bea694d5892453f2270695b92200280ee8c625ec5a477eed3"},
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:da3d6adadcf55a93c214d23941aef4abfd45652110aed6580e814152f385b862"},
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e9fde97ecb7bb9c41261c2ce0da10323e9227555c674989f8d9eb7572fc2098d"},
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:05c70f7f86be6f7bee53794d80050a28ae7e13e4a0087c1839dcdefd68eb36b6"},
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f1d38cbe622039d111b69e9ca945e7e3efebb30ba998867908773183357f3ed"},
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-win32.whl", hash = "sha256:fe239bdfdae2302e93bd6e8264bd9b71290218fff7084a9db250b55caaccf43f"},
{file = "ruamel_yaml_clib-0.2.15-cp310-cp310-win_amd64.whl", hash = "sha256:468858e5cbde0198337e6a2a78eda8c3fb148bdf4c6498eaf4bc9ba3f8e780bd"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c583229f336682b7212a43d2fa32c30e643d3076178fb9f7a6a14dde85a2d8bd"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56ea19c157ed8c74b6be51b5fa1c3aff6e289a041575f0556f66e5fb848bb137"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5fea0932358e18293407feb921d4f4457db837b67ec1837f87074667449f9401"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ef71831bd61fbdb7aa0399d5c4da06bea37107ab5c79ff884cc07f2450910262"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:617d35dc765715fa86f8c3ccdae1e4229055832c452d4ec20856136acc75053f"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1b45498cc81a4724a2d42273d6cfc243c0547ad7c6b87b4f774cb7bcc131c98d"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:def5663361f6771b18646620fca12968aae730132e104688766cf8a3b1d65922"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:014181cdec565c8745b7cbc4de3bf2cc8ced05183d986e6d1200168e5bb59490"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-win32.whl", hash = "sha256:d290eda8f6ada19e1771b54e5706b8f9807e6bb08e873900d5ba114ced13e02c"},
{file = "ruamel_yaml_clib-0.2.15-cp311-cp311-win_amd64.whl", hash = "sha256:bdc06ad71173b915167702f55d0f3f027fc61abd975bd308a0968c02db4a4c3e"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:cb15a2e2a90c8475df45c0949793af1ff413acfb0a716b8b94e488ea95ce7cff"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:64da03cbe93c1e91af133f5bec37fd24d0d4ba2418eaf970d7166b0a26a148a2"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:f6d3655e95a80325b84c4e14c080b2470fe4f33b6846f288379ce36154993fb1"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:71845d377c7a47afc6592aacfea738cc8a7e876d586dfba814501d8c53c1ba60"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11e5499db1ccbc7f4b41f0565e4f799d863ea720e01d3e99fa0b7b5fcd7802c9"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4b293a37dc97e2b1e8a1aec62792d1e52027087c8eea4fc7b5abd2bdafdd6642"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:512571ad41bba04eac7268fe33f7f4742210ca26a81fe0c75357fa682636c690"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e5e9f630c73a490b758bf14d859a39f375e6999aea5ddd2e2e9da89b9953486a"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-win32.whl", hash = "sha256:f4421ab780c37210a07d138e56dd4b51f8642187cdfb433eb687fe8c11de0144"},
{file = "ruamel_yaml_clib-0.2.15-cp312-cp312-win_amd64.whl", hash = "sha256:2b216904750889133d9222b7b873c199d48ecbb12912aca78970f84a5aa1a4bc"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4dcec721fddbb62e60c2801ba08c87010bd6b700054a09998c4d09c08147b8fb"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:65f48245279f9bb301d1276f9679b82e4c080a1ae25e679f682ac62446fac471"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:46895c17ead5e22bea5e576f1db7e41cb273e8d062c04a6a49013d9f60996c25"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3eb199178b08956e5be6288ee0b05b2fb0b5c1f309725ad25d9c6ea7e27f962a"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d1032919280ebc04a80e4fb1e93f7a738129857eaec9448310e638c8bccefcf"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ab0df0648d86a7ecbd9c632e8f8d6b21bb21b5fc9d9e095c796cacf32a728d2d"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:331fb180858dd8534f0e61aa243b944f25e73a4dae9962bd44c46d1761126bbf"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fd4c928ddf6bce586285daa6d90680b9c291cfd045fc40aad34e445d57b1bf51"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-win32.whl", hash = "sha256:bf0846d629e160223805db9fe8cc7aec16aaa11a07310c50c8c7164efa440aec"},
{file = "ruamel_yaml_clib-0.2.15-cp313-cp313-win_amd64.whl", hash = "sha256:45702dfbea1420ba3450bb3dd9a80b33f0badd57539c6aac09f42584303e0db6"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:753faf20b3a5906faf1fc50e4ddb8c074cb9b251e00b14c18b28492f933ac8ef"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:480894aee0b29752560a9de46c0e5f84a82602f2bc5c6cde8db9a345319acfdf"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4d3b58ab2454b4747442ac76fab66739c72b1e2bb9bd173d7694b9f9dbc9c000"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bfd309b316228acecfa30670c3887dcedf9b7a44ea39e2101e75d2654522acd4"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2812ff359ec1f30129b62372e5f22a52936fac13d5d21e70373dbca5d64bb97c"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7e74ea87307303ba91073b63e67f2c667e93f05a8c63079ee5b7a5c8d0d7b043"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:713cd68af9dfbe0bb588e144a61aad8dcc00ef92a82d2e87183ca662d242f524"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:542d77b72786a35563f97069b9379ce762944e67055bea293480f7734b2c7e5e"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-win32.whl", hash = "sha256:424ead8cef3939d690c4b5c85ef5b52155a231ff8b252961b6516ed7cf05f6aa"},
{file = "ruamel_yaml_clib-0.2.15-cp314-cp314-win_amd64.whl", hash = "sha256:ac9b8d5fa4bb7fd2917ab5027f60d4234345fd366fe39aa711d5dca090aa1467"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:923816815974425fbb1f1bf57e85eca6e14d8adc313c66db21c094927ad01815"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dcc7f3162d3711fd5d52e2267e44636e3e566d1e5675a5f0b30e98f2c4af7974"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5d3c9210219cbc0f22706f19b154c9a798ff65a6beeafbf77fc9c057ec806f7d"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1bb7b728fd9f405aa00b4a0b17ba3f3b810d0ccc5f77f7373162e9b5f0ff75d5"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3cb75a3c14f1d6c3c2a94631e362802f70e83e20d1f2b2ef3026c05b415c4900"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:badd1d7283f3e5894779a6ea8944cc765138b96804496c91812b2829f70e18a7"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:0ba6604bbc3dfcef844631932d06a1a4dcac3fee904efccf582261948431628a"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a8220fd4c6f98485e97aea65e1df76d4fed1678ede1fe1d0eed2957230d287c4"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-win32.whl", hash = "sha256:04d21dc9c57d9608225da28285900762befbb0165ae48482c15d8d4989d4af14"},
{file = "ruamel_yaml_clib-0.2.15-cp39-cp39-win_amd64.whl", hash = "sha256:27dc656e84396e6d687f97c6e65fb284d100483628f02d95464fd731743a4afe"},
{file = "ruamel_yaml_clib-0.2.15.tar.gz", hash = "sha256:46e4cc8c43ef6a94885f72512094e482114a8a706d3c555a34ed4b0d20200600"},
]
[[package]]
name = "ruff"
version = "0.14.3"
@ -3691,6 +3949,24 @@ docs = ["sphinxcontrib-websupport"]
lint = ["flake8 (>=6.0)", "mypy (==1.11.1)", "pyright (==1.1.384)", "pytest (>=6.0)", "ruff (==0.6.9)", "sphinx-lint (>=0.9)", "tomli (>=2)", "types-Pillow (==10.2.0.20240822)", "types-Pygments (==2.18.0.20240506)", "types-colorama (==0.4.15.20240311)", "types-defusedxml (==0.7.0.20240218)", "types-docutils (==0.21.0.20241005)", "types-requests (==2.32.0.20240914)", "types-urllib3 (==1.26.25.14)"]
test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=8.0)", "setuptools (>=70.0)", "typing_extensions (>=4.9)"]
[[package]]
name = "sphinx-autodoc-typehints"
version = "3.0.1"
description = "Type hints (PEP 484) support for the Sphinx autodoc extension"
optional = true
python-versions = ">=3.10"
files = [
{file = "sphinx_autodoc_typehints-3.0.1-py3-none-any.whl", hash = "sha256:4b64b676a14b5b79cefb6628a6dc8070e320d4963e8ff640a2f3e9390ae9045a"},
{file = "sphinx_autodoc_typehints-3.0.1.tar.gz", hash = "sha256:b9b40dd15dee54f6f810c924f863f9cf1c54f9f3265c495140ea01be7f44fa55"},
]
[package.dependencies]
sphinx = ">=8.1.3"
[package.extras]
docs = ["furo (>=2024.8.6)"]
testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "defusedxml (>=0.7.1)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "sphobjinv (>=2.3.1.2)", "typing-extensions (>=4.12.2)"]
[[package]]
name = "sphinx-copybutton"
version = "0.5.2"
@ -3734,6 +4010,22 @@ theme-pydata = ["pydata-sphinx-theme (>=0.15.2,<0.16.0)"]
theme-rtd = ["sphinx-rtd-theme (>=2.0,<3.0)"]
theme-sbt = ["sphinx-book-theme (>=1.1,<2.0)"]
[[package]]
name = "sphinx-jinja2-compat"
version = "0.4.1"
description = "Patches Jinja2 v3 to restore compatibility with earlier Sphinx versions."
optional = true
python-versions = ">=3.6"
files = [
{file = "sphinx_jinja2_compat-0.4.1-py3-none-any.whl", hash = "sha256:64ca0d46f0d8029fbe69ea612793a55e6ef0113e1bba4a85d402158c09f17a14"},
{file = "sphinx_jinja2_compat-0.4.1.tar.gz", hash = "sha256:0188f0802d42c3da72997533b55a00815659a78d3f81d4b4747b1fb15a5728e6"},
]
[package.dependencies]
jinja2 = ">=2.10"
markupsafe = ">=1"
standard-imghdr = {version = "3.10.14", markers = "python_version >= \"3.13\""}
[[package]]
name = "sphinx-lint"
version = "1.0.1"
@ -3752,6 +4044,80 @@ regex = "*"
[package.extras]
tests = ["pytest", "pytest-cov"]
[[package]]
name = "sphinx-prompt"
version = "1.9.0"
description = "Sphinx directive to add unselectable prompt"
optional = true
python-versions = ">=3.10"
files = [
{file = "sphinx_prompt-1.9.0-py3-none-any.whl", hash = "sha256:fd731446c03f043d1ff6df9f22414495b23067c67011cc21658ea8d36b3575fc"},
{file = "sphinx_prompt-1.9.0.tar.gz", hash = "sha256:471b3c6d466dce780a9b167d9541865fd4e9a80ed46e31b06a52a0529ae995a1"},
]
[package.dependencies]
certifi = "*"
docutils = "*"
idna = "*"
pygments = "*"
Sphinx = ">=8.0.0,<9.0.0"
urllib3 = "*"
[[package]]
name = "sphinx-tabs"
version = "3.4.5"
description = "Tabbed views for Sphinx"
optional = true
python-versions = "~=3.7"
files = [
{file = "sphinx-tabs-3.4.5.tar.gz", hash = "sha256:ba9d0c1e3e37aaadd4b5678449eb08176770e0fc227e769b6ce747df3ceea531"},
{file = "sphinx_tabs-3.4.5-py3-none-any.whl", hash = "sha256:92cc9473e2ecf1828ca3f6617d0efc0aa8acb06b08c56ba29d1413f2f0f6cf09"},
]
[package.dependencies]
docutils = "*"
pygments = "*"
sphinx = "*"
[package.extras]
code-style = ["pre-commit (==2.13.0)"]
testing = ["bs4", "coverage", "pygments", "pytest (>=7.1,<8)", "pytest-cov", "pytest-regressions", "rinohtype"]
[[package]]
name = "sphinx-toolbox"
version = "4.1.1"
description = "Box of handy tools for Sphinx 🧰 📔"
optional = true
python-versions = ">=3.7"
files = [
{file = "sphinx_toolbox-4.1.1-py3-none-any.whl", hash = "sha256:1ee2616091453430ffe41e8371e0ddd22a5c1f504ba2dfb306f50870f3f7672a"},
{file = "sphinx_toolbox-4.1.1.tar.gz", hash = "sha256:1bb1750bf9e1f72a54161b0867caf3b6bf2ee216ecb9f8c519f0a9348824954a"},
]
[package.dependencies]
apeye = ">=0.4.0"
autodocsumm = ">=0.2.0"
beautifulsoup4 = ">=4.9.1"
cachecontrol = {version = ">=0.13.0", extras = ["filecache"]}
dict2css = ">=0.2.3"
docutils = ">=0.16"
domdf-python-tools = ">=2.9.0"
filelock = ">=3.8.0"
html5lib = ">=1.1"
roman = ">4.0"
"ruamel.yaml" = ">=0.16.12,<=0.18.16"
sphinx = ">=3.2.0"
sphinx-autodoc-typehints = ">=1.11.1"
sphinx-jinja2-compat = ">=0.1.0"
sphinx-prompt = ">=1.1.0"
sphinx-tabs = ">=1.2.1,<3.4.7"
tabulate = ">=0.8.7"
typing-extensions = ">=3.7.4.3,<3.10.0.1 || >3.10.0.1"
[package.extras]
all = ["coincidence (>=0.4.3)", "pygments (>=2.7.4,<=2.13.0)"]
testing = ["coincidence (>=0.4.3)", "pygments (>=2.7.4,<=2.13.0)"]
[[package]]
name = "sphinxcontrib-applehelp"
version = "2.0.0"
@ -3872,6 +4238,17 @@ files = [
{file = "standard_chunk-3.13.0.tar.gz", hash = "sha256:4ac345d37d7e686d2755e01836b8d98eda0d1a3ee90375e597ae43aaf064d654"},
]
[[package]]
name = "standard-imghdr"
version = "3.10.14"
description = "Standard library imghdr redistribution. \"dead battery\"."
optional = true
python-versions = "*"
files = [
{file = "standard_imghdr-3.10.14-py3-none-any.whl", hash = "sha256:cdf6883163349624dee9a81d2853a20260337c4cd41c04e99c082e01833a08e2"},
{file = "standard_imghdr-3.10.14.tar.gz", hash = "sha256:2598fe2e7c540dbda34b233295e10957ab8dc8ac6f3bd9eaa8d38be167232e52"},
]
[[package]]
name = "standard-sunau"
version = "3.13.0"
@ -4133,6 +4510,17 @@ h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "webencodings"
version = "0.5.1"
description = "Character encoding aliases for legacy web content"
optional = true
python-versions = "*"
files = [
{file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"},
{file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"},
]
[[package]]
name = "werkzeug"
version = "3.1.3"
@ -4172,7 +4560,7 @@ beatport = ["requests-oauthlib"]
bpd = ["PyGObject"]
chroma = ["pyacoustid"]
discogs = ["python3-discogs-client"]
docs = ["docutils", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinx-design"]
docs = ["docutils", "pydata-sphinx-theme", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx-toolbox"]
embedart = ["Pillow"]
embyupdate = ["requests"]
fetchart = ["Pillow", "beautifulsoup4", "langdetect", "requests"]
@ -4180,13 +4568,9 @@ import = ["py7zr", "rarfile"]
kodiupdate = ["requests"]
lastgenre = ["pylast"]
lastimport = ["pylast"]
listenbrainz = ["musicbrainzngs"]
lyrics = ["beautifulsoup4", "langdetect", "requests"]
mbcollection = ["musicbrainzngs"]
metasync = ["dbus-python"]
missing = ["musicbrainzngs"]
mpdstats = ["python-mpd2"]
parentwork = ["musicbrainzngs"]
plexupdate = ["requests"]
reflink = ["reflink"]
replaygain = ["PyGObject"]
@ -4199,4 +4583,4 @@ web = ["flask", "flask-cors"]
[metadata]
lock-version = "2.0"
python-versions = ">=3.10,<4"
content-hash = "45c7dc4ec30f4460a09554d0ec0ebcafebff097386e005e29e12830d16d223dd"
content-hash = "f8ce55ae74c5e3c5d1d330582f83dae30ef963a0b8dd8c8b79f16c3bcfdb525a"

View file

@ -69,7 +69,6 @@ scipy = [ # for librosa
{ python = "<3.13", version = ">=1.13.1", optional = true },
{ python = ">=3.13", version = ">=1.16.1", optional = true },
]
musicbrainzngs = { version = ">=0.4", optional = true }
numba = [ # for librosa
{ python = "<3.13", version = ">=0.60", optional = true },
{ python = ">=3.13", version = ">=0.62.1", optional = true },
@ -94,6 +93,7 @@ pydata-sphinx-theme = { version = "*", optional = true }
sphinx = { version = "*", optional = true }
sphinx-design = { version = ">=0.6.1", optional = true }
sphinx-copybutton = { version = ">=0.5.2", optional = true }
sphinx-toolbox = { version = ">=4.1.0", optional = true }
titlecase = { version = "^2.4.1", optional = true }
[tool.poetry.group.test.dependencies]
@ -117,7 +117,7 @@ titlecase = "^2.4.1"
[tool.poetry.group.lint.dependencies]
docstrfmt = ">=1.11.1"
ruff = ">=0.6.4"
ruff = ">=0.13.0"
sphinx-lint = ">=1.0.0"
[tool.poetry.group.typing.dependencies]
@ -152,6 +152,7 @@ docs = [
"sphinx-lint",
"sphinx-design",
"sphinx-copybutton",
"sphinx-toolbox",
]
discogs = ["python3-discogs-client"]
embedart = ["Pillow"] # ImageMagick
@ -163,13 +164,9 @@ import = ["py7zr", "rarfile"]
kodiupdate = ["requests"]
lastgenre = ["pylast"]
lastimport = ["pylast"]
listenbrainz = ["musicbrainzngs"]
lyrics = ["beautifulsoup4", "langdetect", "requests"]
mbcollection = ["musicbrainzngs"]
metasync = ["dbus-python"]
missing = ["musicbrainzngs"]
mpdstats = ["python-mpd2"]
parentwork = ["musicbrainzngs"]
plexupdate = ["requests"]
reflink = ["reflink"]
replaygain = [
@ -229,7 +226,7 @@ cmd = "make -C docs $COMMANDS"
[tool.poe.tasks.format]
help = "Format the codebase"
cmd = "ruff format"
cmd = "ruff format --config=pyproject.toml"
[tool.poe.tasks.format-docs]
help = "Format the documentation"
@ -237,7 +234,7 @@ cmd = "docstrfmt docs *.rst"
[tool.poe.tasks.lint]
help = "Check the code for linting issues. Accepts ruff options."
cmd = "ruff check"
cmd = "ruff check --config=pyproject.toml"
[tool.poe.tasks.lint-docs]
help = "Lint the documentation"
@ -293,10 +290,11 @@ extend-exclude = [
]
[tool.ruff]
target-version = "py39"
target-version = "py310"
line-length = 80
[tool.ruff.lint]
future-annotations = true
select = [
# "ARG", # flake8-unused-arguments
# "C4", # flake8-comprehensions
@ -308,9 +306,9 @@ select = [
"ISC", # flake8-implicit-str-concat
"N", # pep8-naming
"PT", # flake8-pytest-style
# "RUF", # ruff
"RUF", # ruff
"UP", # pyupgrade
"TCH", # flake8-type-checking
"TC", # flake8-type-checking
"W", # pycodestyle
]
ignore = [
@ -322,6 +320,8 @@ ignore = [
"test/plugins/test_ftintitle.py" = ["E501"]
"test/test_util.py" = ["E501"]
"test/ui/test_field_diff.py" = ["E501"]
"test/util/test_id_extractors.py" = ["E501"]
"test/**" = ["RUF001"] # we use Unicode characters in tests
[tool.ruff.lint.isort]
split-on-trailing-comma = false

View file

@ -337,15 +337,15 @@ class TestDataSourceDistance:
_p("Original", "Original", 0.5, 1.0, True, MATCH, id="match"),
_p("Original", "Other", 0.5, 1.0, True, MISMATCH, id="mismatch"),
_p("Other", "Original", 0.5, 1.0, True, MISMATCH, id="mismatch"),
_p("Original", "unknown", 0.5, 1.0, True, MISMATCH, id="mismatch-unknown"), # noqa: E501
_p("Original", None, 0.5, 1.0, True, MISMATCH, id="mismatch-no-info"), # noqa: E501
_p("Original", "unknown", 0.5, 1.0, True, MISMATCH, id="mismatch-unknown"),
_p("Original", None, 0.5, 1.0, True, MISMATCH, id="mismatch-no-info"),
_p(None, "Other", 0.5, 1.0, True, MISMATCH, id="mismatch-no-original-multiple-sources"), # noqa: E501
_p(None, "Other", 0.5, 1.0, False, MATCH, id="match-no-original-but-single-source"), # noqa: E501
_p("unknown", "unknown", 0.5, 1.0, True, MATCH, id="match-unknown"),
_p("Original", "Other", 1.0, 1.0, True, 0.25, id="mismatch-max-penalty"), # noqa: E501
_p("Original", "Other", 0.5, 5.0, True, 0.3125, id="mismatch-high-weight"), # noqa: E501
_p("Original", "Other", 0.0, 1.0, True, MATCH, id="match-no-penalty"), # noqa: E501
_p("Original", "Other", 0.5, 0.0, True, MATCH, id="match-no-weight"), # noqa: E501
_p("Original", "Other", 1.0, 1.0, True, 0.25, id="mismatch-max-penalty"),
_p("Original", "Other", 0.5, 5.0, True, 0.3125, id="mismatch-high-weight"),
_p("Original", "Other", 0.0, 1.0, True, MATCH, id="match-no-penalty"),
_p("Original", "Other", 0.5, 0.0, True, MATCH, id="match-no-weight"),
],
) # fmt: skip
def test_distance(self, item, info, expected_distance):

22
test/plugins/conftest.py Normal file
View file

@ -0,0 +1,22 @@
from __future__ import annotations
from typing import TYPE_CHECKING
import pytest
import requests
if TYPE_CHECKING:
from requests_mock import Mocker
@pytest.fixture
def requests_mock(requests_mock, monkeypatch) -> Mocker:
"""Use plain session wherever MB requests are mocked.
This avoids rate limiting requests to speed up tests.
"""
monkeypatch.setattr(
"beetsplug._utils.musicbrainz.MusicBrainzAPI.create_session",
lambda _: requests.Session(),
)
return requests_mock

View file

@ -24,7 +24,7 @@ class LyricsPage(NamedTuple):
artist: str = "The Beatles"
track_title: str = "Lady Madonna"
url_title: str | None = None # only relevant to the Google backend
marks: list[str] = [] # markers for pytest.param
marks: list[str] = [] # markers for pytest.param # noqa: RUF012
def __str__(self) -> str:
"""Return name of this test case."""

View file

@ -14,12 +14,17 @@
"""Tests for the 'albumtypes' plugin."""
from collections.abc import Sequence
from __future__ import annotations
from typing import TYPE_CHECKING
from beets.test.helper import PluginTestCase
from beetsplug.albumtypes import AlbumTypesPlugin
from beetsplug.musicbrainz import VARIOUS_ARTISTS_ID
if TYPE_CHECKING:
from collections.abc import Sequence
class AlbumTypesPluginTest(PluginTestCase):
"""Tests for albumtypes plugin."""

View file

@ -1,13 +1,17 @@
from __future__ import annotations
import os
from http import HTTPStatus
from pathlib import Path
from typing import Any
from typing import TYPE_CHECKING, Any
import pytest
from flask.testing import Client
from beets.test.helper import TestHelper
if TYPE_CHECKING:
from flask.testing import Client
@pytest.fixture(scope="session", autouse=True)
def helper():

View file

@ -22,6 +22,7 @@ import threading
import time
import unittest
from contextlib import contextmanager
from typing import ClassVar
from unittest.mock import MagicMock, patch
import confuse
@ -837,7 +838,7 @@ class BPDQueueTest(BPDTestHelper):
fail=True,
)
METADATA = {"Pos", "Time", "Id", "file", "duration"}
METADATA: ClassVar[set[str]] = {"Pos", "Time", "Id", "file", "duration"}
def test_cmd_add(self):
with self.run_bpd() as client:
@ -1032,7 +1033,7 @@ class BPDConnectionTest(BPDTestHelper):
}
)
ALL_MPD_TAGTYPES = {
ALL_MPD_TAGTYPES: ClassVar[set[str]] = {
"Artist",
"ArtistSort",
"Album",
@ -1057,7 +1058,7 @@ class BPDConnectionTest(BPDTestHelper):
"MUSICBRAINZ_RELEASETRACKID",
"MUSICBRAINZ_WORKID",
}
UNSUPPORTED_TAGTYPES = {
UNSUPPORTED_TAGTYPES: ClassVar[set[str]] = {
"MUSICBRAINZ_WORKID", # not tracked by beets
"Performer", # not tracked by beets
"AlbumSort", # not tracked by beets

View file

@ -11,14 +11,14 @@
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import annotations
import fnmatch
import os.path
import re
import sys
import unittest
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from mediafile import MediaFile
@ -35,6 +35,9 @@ from beets.test.helper import (
)
from beetsplug import convert
if TYPE_CHECKING:
from pathlib import Path
def shell_quote(text):
import shlex

View file

@ -21,7 +21,19 @@ import pytest
from beets import config
from beets.test._common import Bag
from beets.test.helper import BeetsTestCase, capture_log
from beetsplug.discogs import DiscogsPlugin
from beetsplug.discogs import ArtistState, DiscogsPlugin
def _artist(name: str, **kwargs):
return {
"id": 1,
"name": name,
"join": "",
"role": "",
"anv": "",
"tracks": "",
"resource_url": "",
} | kwargs
@patch("beetsplug.discogs.DiscogsPlugin.setup", Mock())
@ -35,9 +47,7 @@ class DGAlbumInfoTest(BeetsTestCase):
"uri": "https://www.discogs.com/release/release/13633721",
"title": "ALBUM TITLE",
"year": "3001",
"artists": [
{"name": "ARTIST NAME", "id": "ARTIST ID", "join": ","}
],
"artists": [_artist("ARTIST NAME", id="ARTIST ID", join=",")],
"formats": [
{
"descriptions": ["FORMAT DESC 1", "FORMAT DESC 2"],
@ -325,7 +335,7 @@ class DGAlbumInfoTest(BeetsTestCase):
"id": 123,
"uri": "https://www.discogs.com/release/123456-something",
"tracklist": [self._make_track("A", "1", "01:01")],
"artists": [{"name": "ARTIST NAME", "id": 321, "join": ""}],
"artists": [_artist("ARTIST NAME", id=321)],
"title": "TITLE",
}
release = Bag(
@ -385,14 +395,12 @@ class DGAlbumInfoTest(BeetsTestCase):
"position": "A",
"type_": "track",
"duration": "5:44",
"artists": [
{"name": "TEST ARTIST (5)", "tracks": "", "id": 11146}
],
"artists": [_artist("TEST ARTIST (5)", id=11146)],
}
],
"artists": [
{"name": "ARTIST NAME (2)", "id": 321, "join": "&"},
{"name": "OTHER ARTIST (5)", "id": 321, "join": ""},
_artist("ARTIST NAME (2)", id=321, join="&"),
_artist("OTHER ARTIST (5)", id=321),
],
"title": "title",
"labels": [
@ -409,7 +417,12 @@ class DGAlbumInfoTest(BeetsTestCase):
)
d = DiscogsPlugin().get_album_info(release)
assert d.artist == "ARTIST NAME & OTHER ARTIST"
assert d.artists == ["ARTIST NAME", "OTHER ARTIST"]
assert d.artists_ids == ["321", "321"]
assert d.tracks[0].artist == "TEST ARTIST"
assert d.tracks[0].artists == ["TEST ARTIST"]
assert d.tracks[0].artist_id == "11146"
assert d.tracks[0].artists_ids == ["11146"]
assert d.label == "LABEL NAME"
def test_strip_disambiguation_false(self):
@ -424,14 +437,12 @@ class DGAlbumInfoTest(BeetsTestCase):
"position": "A",
"type_": "track",
"duration": "5:44",
"artists": [
{"name": "TEST ARTIST (5)", "tracks": "", "id": 11146}
],
"artists": [_artist("TEST ARTIST (5)", id=11146)],
}
],
"artists": [
{"name": "ARTIST NAME (2)", "id": 321, "join": "&"},
{"name": "OTHER ARTIST (5)", "id": 321, "join": ""},
_artist("ARTIST NAME (2)", id=321, join="&"),
_artist("OTHER ARTIST (5)", id=321),
],
"title": "title",
"labels": [
@ -448,35 +459,62 @@ class DGAlbumInfoTest(BeetsTestCase):
)
d = DiscogsPlugin().get_album_info(release)
assert d.artist == "ARTIST NAME (2) & OTHER ARTIST (5)"
assert d.artists == ["ARTIST NAME (2)", "OTHER ARTIST (5)"]
assert d.tracks[0].artist == "TEST ARTIST (5)"
assert d.tracks[0].artists == ["TEST ARTIST (5)"]
assert d.label == "LABEL NAME (5)"
config["discogs"]["strip_disambiguation"] = True
@pytest.mark.parametrize(
"track_artist_anv,track_artist",
[(False, "ARTIST Feat. PERFORMER"), (True, "VARIATION Feat. VARIATION")],
)
@pytest.mark.parametrize(
"album_artist_anv,album_artist",
[(False, "ARTIST & SOLOIST"), (True, "VARIATION & VARIATION")],
)
@pytest.mark.parametrize(
"artist_credit_anv,track_artist_credit,album_artist_credit",
"track_artist_anv,track_artist,track_artists",
[
(False, "ARTIST Feat. PERFORMER", "ARTIST & SOLOIST"),
(True, "VARIATION Feat. VARIATION", "VARIATION & VARIATION"),
(False, "ARTIST Feat. PERFORMER", ["ARTIST", "PERFORMER"]),
(True, "ART Feat. PERF", ["ART", "PERF"]),
],
)
@pytest.mark.parametrize(
"album_artist_anv,album_artist,album_artists",
[
(False, "DRUMMER, ARTIST & SOLOIST", ["DRUMMER", "ARTIST", "SOLOIST"]),
(True, "DRUM, ARTY & SOLO", ["DRUM", "ARTY", "SOLO"]),
],
)
@pytest.mark.parametrize(
(
"artist_credit_anv,track_artist_credit,"
"track_artists_credit,album_artist_credit,album_artists_credit"
),
[
(
False,
"ARTIST Feat. PERFORMER",
["ARTIST", "PERFORMER"],
"DRUMMER, ARTIST & SOLOIST",
["DRUMMER", "ARTIST", "SOLOIST"],
),
(
True,
"ART Feat. PERF",
["ART", "PERF"],
"DRUM, ARTY & SOLO",
["DRUM", "ARTY", "SOLO"],
),
],
)
@patch("beetsplug.discogs.DiscogsPlugin.setup", Mock())
def test_anv(
track_artist_anv,
track_artist,
track_artists,
album_artist_anv,
album_artist,
album_artists,
artist_credit_anv,
track_artist_credit,
track_artists_credit,
album_artist_credit,
album_artists_credit,
):
"""Test using artist name variations."""
data = {
@ -488,27 +526,21 @@ def test_anv(
"position": "A",
"type_": "track",
"duration": "5:44",
"artists": [
{
"name": "ARTIST",
"tracks": "",
"anv": "VARIATION",
"id": 11146,
}
],
"artists": [_artist("ARTIST", id=11146, anv="ART")],
"extraartists": [
{
"name": "PERFORMER",
"role": "Featuring",
"anv": "VARIATION",
"id": 787,
}
_artist(
"PERFORMER",
id=787,
role="Featuring",
anv="PERF",
)
],
}
],
"artists": [
{"name": "ARTIST (4)", "anv": "VARIATION", "id": 321, "join": "&"},
{"name": "SOLOIST", "anv": "VARIATION", "id": 445, "join": ""},
_artist("DRUMMER", id=445, anv="DRUM", join=", "),
_artist("ARTIST (4)", id=321, anv="ARTY", join="&"),
_artist("SOLOIST", id=445, anv="SOLO"),
],
"title": "title",
}
@ -522,9 +554,53 @@ def test_anv(
config["discogs"]["anv"]["artist_credit"] = artist_credit_anv
r = DiscogsPlugin().get_album_info(release)
assert r.artist == album_artist
assert r.artists == album_artists
assert r.artist_credit == album_artist_credit
assert r.artists_credit == album_artists_credit
assert r.tracks[0].artist == track_artist
assert r.tracks[0].artists == track_artists
assert r.tracks[0].artist_credit == track_artist_credit
assert r.tracks[0].artists_credit == track_artists_credit
@pytest.mark.parametrize("artist_anv", [True, False])
@pytest.mark.parametrize("albumartist_anv", [True, False])
@pytest.mark.parametrize("artistcredit_anv", [True, False])
@patch("beetsplug.discogs.DiscogsPlugin.setup", Mock())
def test_anv_no_variation(artist_anv, albumartist_anv, artistcredit_anv):
"""Test behavior when there is no ANV but the anv field is set"""
data = {
"id": 123,
"uri": "https://www.discogs.com/release/123456-something",
"tracklist": [
{
"title": "track",
"position": "A",
"type_": "track",
"duration": "5:44",
"artists": [_artist("PERFORMER", id=1)],
}
],
"artists": [_artist("ARTIST", id=2)],
"title": "title",
}
release = Bag(
data=data,
title=data["title"],
artists=[Bag(data=d) for d in data["artists"]],
)
config["discogs"]["anv"]["album_artist"] = albumartist_anv
config["discogs"]["anv"]["artist"] = artist_anv
config["discogs"]["anv"]["artist_credit"] = artistcredit_anv
r = DiscogsPlugin().get_album_info(release)
assert r.artist == "ARTIST"
assert r.artists == ["ARTIST"]
assert r.artist_credit == "ARTIST"
assert r.artists_credit == ["ARTIST"]
assert r.tracks[0].artist == "PERFORMER"
assert r.tracks[0].artists == ["PERFORMER"]
assert r.tracks[0].artist_credit == "PERFORMER"
assert r.tracks[0].artists_credit == ["PERFORMER"]
@patch("beetsplug.discogs.DiscogsPlugin.setup", Mock())
@ -543,9 +619,7 @@ def test_anv_album_artist():
"duration": "5:44",
}
],
"artists": [
{"name": "ARTIST (4)", "anv": "VARIATION", "id": 321},
],
"artists": [_artist("ARTIST (4)", id=321, anv="VARIATION")],
"title": "title",
}
release = Bag(
@ -558,13 +632,18 @@ def test_anv_album_artist():
config["discogs"]["anv"]["artist_credit"] = False
r = DiscogsPlugin().get_album_info(release)
assert r.artist == "ARTIST"
assert r.artists == ["ARTIST"]
assert r.artist_credit == "ARTIST"
assert r.artist_id == "321"
assert r.artists_credit == ["ARTIST"]
assert r.tracks[0].artist == "VARIATION"
assert r.tracks[0].artists == ["VARIATION"]
assert r.tracks[0].artist_credit == "ARTIST"
assert r.tracks[0].artists_credit == ["ARTIST"]
@pytest.mark.parametrize(
"track, expected_artist",
"track, expected_artist, expected_artists",
[
(
{
@ -573,45 +652,32 @@ def test_anv_album_artist():
"position": "1",
"duration": "5:00",
"artists": [
{"name": "NEW ARTIST", "tracks": "", "id": 11146},
{"name": "VOCALIST", "tracks": "", "id": 344, "join": "&"},
_artist("NEW ARTIST", id=11146, join="&"),
_artist("VOCALIST", id=344, join="feat."),
],
"extraartists": [
{
"name": "SOLOIST",
"id": 3,
"role": "Featuring",
},
{
"name": "PERFORMER (1)",
"id": 5,
"role": "Other Role, Featuring",
},
{
"name": "RANDOM",
"id": 8,
"role": "Written-By",
},
{
"name": "MUSICIAN",
"id": 10,
"role": "Featuring [Uncredited]",
},
_artist("SOLOIST", id=3, role="Featuring"),
_artist(
"PERFORMER (1)", id=5, role="Other Role, Featuring"
),
_artist("RANDOM", id=8, role="Written-By"),
_artist("MUSICIAN", id=10, role="Featuring [Uncredited]"),
],
},
"NEW ARTIST, VOCALIST Feat. SOLOIST, PERFORMER, MUSICIAN",
"NEW ARTIST & VOCALIST feat. SOLOIST, PERFORMER, MUSICIAN",
["NEW ARTIST", "VOCALIST", "SOLOIST", "PERFORMER", "MUSICIAN"],
),
],
)
@patch("beetsplug.discogs.DiscogsPlugin.setup", Mock())
def test_parse_featured_artists(track, expected_artist):
def test_parse_featured_artists(track, expected_artist, expected_artists):
"""Tests the plugins ability to parse a featured artist.
Initial check with one featured artist, two featured artists,
and three. Ignores artists that are not listed as featured."""
t = DiscogsPlugin().get_track_info(
track, 1, 1, ("ARTIST", "ARTIST CREDIT", 2)
)
Ignores artists that are not listed as featured."""
plugin = DiscogsPlugin()
artistinfo = ArtistState.from_config(plugin.config, [_artist("ARTIST")])
t, _, _ = plugin.get_track_info(track, 1, 1, artistinfo)
assert t.artist == expected_artist
assert t.artists == expected_artists
@pytest.mark.parametrize(
@ -637,6 +703,32 @@ def test_get_media_and_albumtype(formats, expected_media, expected_albumtype):
assert result == (expected_media, expected_albumtype)
@pytest.mark.parametrize(
"given_artists,expected_info,config_va_name",
[
(
[_artist("Various")],
{
"artist": "VARIOUS ARTISTS",
"artist_id": "1",
"artists": ["VARIOUS ARTISTS"],
"artists_ids": ["1"],
"artist_credit": "VARIOUS ARTISTS",
"artists_credit": ["VARIOUS ARTISTS"],
},
"VARIOUS ARTISTS",
)
],
)
@patch("beetsplug.discogs.DiscogsPlugin.setup", Mock())
def test_va_buildartistinfo(given_artists, expected_info, config_va_name):
config["va_name"] = config_va_name
assert (
ArtistState.from_config(DiscogsPlugin().config, given_artists).info
== expected_info
)
@pytest.mark.parametrize(
"position, medium, index, subindex",
[

View file

@ -13,6 +13,7 @@
# included in all copies or substantial portions of the Software.
import codecs
from typing import ClassVar
from unittest.mock import patch
from beets.dbcore.query import TrueQuery
@ -319,7 +320,7 @@ class EditDuringImporterTestCase(
matching = AutotagStub.GOOD
IGNORED = ["added", "album_id", "id", "mtime", "path"]
IGNORED: ClassVar[list[str]] = ["added", "album_id", "id", "mtime", "path"]
def setUp(self):
super().setUp()
@ -350,8 +351,8 @@ class EditDuringImporterNonSingletonTest(EditDuringImporterTestCase):
self.lib.items(),
self.items_orig,
["title"],
self.IGNORED
+ [
[
*self.IGNORED,
"albumartist",
"mb_albumartistid",
"mb_albumartistids",
@ -378,7 +379,7 @@ class EditDuringImporterNonSingletonTest(EditDuringImporterTestCase):
self.lib.items(),
self.items_orig,
[],
self.IGNORED + ["albumartist", "mb_albumartistid"],
[*self.IGNORED, "albumartist", "mb_albumartistid"],
)
assert all("Tag Track" in i.title for i in self.lib.items())
@ -490,6 +491,6 @@ class EditDuringImporterSingletonTest(EditDuringImporterTestCase):
self.lib.items(),
self.items_orig,
["title"],
self.IGNORED + ["albumartist", "mb_albumartistid"],
[*self.IGNORED, "albumartist", "mb_albumartistid"],
)
assert all("Edited Track" in i.title for i in self.lib.items())

View file

@ -98,3 +98,8 @@ class FetchartCliTest(PluginTestCase):
self.run_command("fetchart")
self.album.load()
self.check_cover_is_stored()
def test_colorization(self):
self.config["ui"]["color"] = True
out = self.run_with_output("fetchart")
assert " - the älbum: \x1b[1;31mno art found\x1b[39;49;00m\n" == out

View file

@ -14,15 +14,21 @@
"""Tests for the 'ftintitle' plugin."""
from collections.abc import Generator
from typing import TypeAlias
from __future__ import annotations
from typing import TYPE_CHECKING, TypeAlias
import pytest
from beets.library.models import Album, Item
from beets.library.models import Album
from beets.test.helper import PluginTestCase
from beetsplug import ftintitle
if TYPE_CHECKING:
from collections.abc import Generator
from beets.library.models import Item
ConfigValue: TypeAlias = str | bool | list[str]
@ -321,6 +327,10 @@ def test_find_feat_part(
("Alice and Bob", ("Alice", "Bob")),
("Alice With Bob", ("Alice", "Bob")),
("Alice defeat Bob", ("Alice defeat Bob", None)),
("Alice & Bob feat Charlie", ("Alice & Bob", "Charlie")),
("Alice & Bob ft. Charlie", ("Alice & Bob", "Charlie")),
("Alice & Bob featuring Charlie", ("Alice & Bob", "Charlie")),
("Alice and Bob feat Charlie", ("Alice and Bob", "Charlie")),
],
)
def test_split_on_feat(

View file

@ -19,7 +19,7 @@ import os
import sys
import unittest
from contextlib import contextmanager
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, ClassVar
from beets import plugins
from beets.test.helper import PluginTestCase, capture_log
@ -70,7 +70,7 @@ class HookLogsTest(HookTestCase):
class HookCommandTest(HookTestCase):
EVENTS: list[plugins.EventType] = ["write", "after_write"]
EVENTS: ClassVar[list[plugins.EventType]] = ["write", "after_write"]
def setUp(self):
super().setUp()

View file

@ -0,0 +1,47 @@
import pytest
from beets.test.helper import ConfigMixin
from beetsplug.listenbrainz import ListenBrainzPlugin
class TestListenBrainzPlugin(ConfigMixin):
@pytest.fixture(scope="class")
def plugin(self) -> ListenBrainzPlugin:
self.config["listenbrainz"]["token"] = "test_token"
self.config["listenbrainz"]["username"] = "test_user"
return ListenBrainzPlugin()
@pytest.mark.parametrize(
"search_response, expected_id",
[([{"id": "id1"}], "id1"), ([], None)],
ids=["found", "not_found"],
)
def test_get_mb_recording_id(
self, plugin, requests_mock, search_response, expected_id
):
requests_mock.get(
"/ws/2/recording", json={"recordings": search_response}
)
track = {"track_metadata": {"track_name": "S", "release_name": "A"}}
assert plugin.get_mb_recording_id(track) == expected_id
def test_get_track_info(self, plugin, requests_mock):
requests_mock.get(
"/ws/2/recording/id1?inc=releases%2Bartist-credits",
json={
"title": "T",
"artist-credit": [],
"releases": [{"title": "Al", "date": "2023-01"}],
},
)
assert plugin.get_track_info([{"identifier": "id1"}]) == [
{
"identifier": "id1",
"title": "T",
"artist": None,
"album": "Al",
"year": "2023",
}
]

View file

@ -14,11 +14,13 @@
"""Tests for the 'lyrics' plugin."""
from __future__ import annotations
import re
import textwrap
from functools import partial
from http import HTTPStatus
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
@ -26,7 +28,12 @@ from beets.library import Item
from beets.test.helper import PluginMixin, TestHelper
from beetsplug import lyrics
from .lyrics_pages import LyricsPage, lyrics_pages
from .lyrics_pages import lyrics_pages
if TYPE_CHECKING:
from pathlib import Path
from .lyrics_pages import LyricsPage
PHRASE_BY_TITLE = {
"Lady Madonna": "friday night arrives without a suitcase",
@ -424,7 +431,7 @@ class TestTekstowoLyrics(LyricsBackendTest):
[
("tekstowopl/piosenka24kgoldncityofangels1", True),
(
"tekstowopl/piosenkabeethovenbeethovenpianosonata17tempestthe3rdmovement", # noqa: E501
"tekstowopl/piosenkabeethovenbeethovenpianosonata17tempestthe3rdmovement",
False,
),
],
@ -607,7 +614,7 @@ class TestTranslation:
[00:00:50]
[00:01.00] Some more synced lyrics / Quelques paroles plus synchronisées
Source: https://lrclib.net/api/123""", # noqa: E501
Source: https://lrclib.net/api/123""",
id="synced",
),
pytest.param(

View file

@ -0,0 +1,142 @@
import re
import uuid
from contextlib import nullcontext as does_not_raise
import pytest
from beets.library import Album
from beets.test.helper import PluginMixin, TestHelper
from beets.ui import UserError
from beetsplug import mbcollection
class TestMbCollectionPlugin(PluginMixin, TestHelper):
"""Tests for the MusicBrainzCollectionPlugin class methods."""
plugin = "mbcollection"
COLLECTION_ID = str(uuid.uuid4())
@pytest.fixture(autouse=True)
def setup_config(self):
self.config["musicbrainz"]["user"] = "testuser"
self.config["musicbrainz"]["pass"] = "testpass"
self.config["mbcollection"]["collection"] = self.COLLECTION_ID
@pytest.fixture(autouse=True)
def helper(self):
self.setup_beets()
yield self
self.teardown_beets()
@pytest.mark.parametrize(
"user_collections,expectation",
[
(
[],
pytest.raises(
UserError, match=r"no collections exist for user"
),
),
(
[{"id": "c1", "entity-type": "event"}],
pytest.raises(UserError, match=r"No release collection found."),
),
(
[{"id": "c1", "entity-type": "release"}],
pytest.raises(UserError, match=r"invalid collection ID"),
),
(
[{"id": COLLECTION_ID, "entity-type": "release"}],
does_not_raise(),
),
],
ids=["no collections", "no release collections", "invalid ID", "valid"],
)
def test_get_collection_validation(
self, requests_mock, user_collections, expectation
):
requests_mock.get(
"/ws/2/collection", json={"collections": user_collections}
)
with expectation:
mbcollection.MusicBrainzCollectionPlugin().collection
def test_mbupdate(self, helper, requests_mock, monkeypatch):
"""Verify mbupdate sync of a MusicBrainz collection with the library.
This test ensures that the command:
- fetches collection releases using paginated requests,
- submits releases that exist locally but are missing from the remote
collection
- and removes releases from the remote collection that are not in the
local library. Small chunk sizes are forced to exercise pagination and
batching logic.
"""
for mb_albumid in [
# already present in remote collection
"in_collection1",
"in_collection2",
# two new albums not in remote collection
"00000000-0000-0000-0000-000000000001",
"00000000-0000-0000-0000-000000000002",
]:
helper.lib.add(Album(mb_albumid=mb_albumid))
# The relevant collection
requests_mock.get(
"/ws/2/collection",
json={
"collections": [
{
"id": self.COLLECTION_ID,
"entity-type": "release",
"release-count": 3,
}
]
},
)
collection_releases = f"/ws/2/collection/{self.COLLECTION_ID}/releases"
# Force small fetch chunk to require multiple paged requests.
monkeypatch.setattr(
"beetsplug.mbcollection.MBCollection.FETCH_CHUNK_SIZE", 2
)
# 3 releases are fetched in two pages.
requests_mock.get(
re.compile(rf".*{collection_releases}\b.*&offset=0.*"),
json={
"releases": [{"id": "in_collection1"}, {"id": "not_in_library"}]
},
)
requests_mock.get(
re.compile(rf".*{collection_releases}\b.*&offset=2.*"),
json={"releases": [{"id": "in_collection2"}]},
)
# Force small submission chunk
monkeypatch.setattr(
"beetsplug.mbcollection.MBCollection.SUBMISSION_CHUNK_SIZE", 1
)
# so that releases are added using two requests
requests_mock.put(
re.compile(
rf".*{collection_releases}/00000000-0000-0000-0000-000000000001"
)
)
requests_mock.put(
re.compile(
rf".*{collection_releases}/00000000-0000-0000-0000-000000000002"
)
)
# and finally, one release is removed
requests_mock.delete(
re.compile(rf".*{collection_releases}/not_in_library")
)
helper.run_command("mbupdate", "--remove")
assert requests_mock.call_count == 6

View file

@ -1,6 +1,8 @@
from __future__ import annotations
import json
import pathlib
from copy import deepcopy
from typing import TYPE_CHECKING
import pytest
@ -9,13 +11,17 @@ from beets.autotag.distance import Distance
from beets.autotag.hooks import AlbumInfo, TrackInfo
from beets.library import Item
from beets.test.helper import PluginMixin
from beetsplug._typing import JSONDict
from beetsplug.mbpseudo import (
_STATUS_PSEUDO,
MusicBrainzPseudoReleasePlugin,
PseudoAlbumInfo,
)
if TYPE_CHECKING:
import pathlib
from beetsplug._typing import JSONDict
@pytest.fixture(scope="module")
def rsrc_dir(pytestconfig: pytest.Config):
@ -94,7 +100,7 @@ class TestMBPseudoMixin(PluginMixin):
@pytest.fixture(autouse=True)
def patch_get_release(self, monkeypatch, pseudo_release: JSONDict):
monkeypatch.setattr(
"beetsplug.musicbrainz.MusicBrainzAPI.get_release",
"beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release",
lambda _, album_id: deepcopy(
{pseudo_release["id"]: pseudo_release}[album_id]
),

View file

@ -0,0 +1,61 @@
import uuid
import pytest
from beets.library import Album
from beets.test.helper import PluginMixin, TestHelper
@pytest.fixture
def helper():
helper = TestHelper()
helper.setup_beets()
yield helper
helper.teardown_beets()
class TestMissingAlbums(PluginMixin):
plugin = "missing"
album_in_lib = Album(
album="Album",
albumartist="Artist",
mb_albumartistid=str(uuid.uuid4()),
mb_albumid="album",
)
@pytest.mark.parametrize(
"release_from_mb,expected_output",
[
pytest.param(
{"id": "other", "title": "Other Album"},
"Artist - Other Album\n",
id="missing",
),
pytest.param(
{"id": album_in_lib.mb_albumid, "title": album_in_lib.album},
"",
marks=pytest.mark.xfail(
reason=(
"Album in lib must not be reported as missing."
" Needs fixing."
)
),
id="not missing",
),
],
)
def test_missing_artist_albums(
self, requests_mock, helper, release_from_mb, expected_output
):
helper.lib.add(self.album_in_lib)
requests_mock.get(
f"/ws/2/release-group?artist={self.album_in_lib.mb_albumartistid}",
json={"release-groups": [release_from_mb]},
)
with self.configure_plugin({}):
assert (
helper.run_with_output("missing", "--album") == expected_output
)

View file

@ -13,6 +13,7 @@
# included in all copies or substantial portions of the Software.
from typing import Any, ClassVar
from unittest.mock import ANY, Mock, call, patch
from beets import util
@ -46,9 +47,8 @@ class MPDStatsTest(PluginTestCase):
assert mpdstats.get_item("/some/non-existing/path") is None
assert "item not found:" in log.info.call_args[0][0]
FAKE_UNKNOWN_STATE = "some-unknown-one"
STATUSES = [
{"state": FAKE_UNKNOWN_STATE},
STATUSES: ClassVar[list[dict[str, Any]]] = [
{"state": "some-unknown-one"},
{"state": "pause"},
{"state": "play", "songid": 1, "time": "0:1"},
{"state": "stop"},

View file

@ -15,6 +15,7 @@
"""Tests for MusicBrainz API wrapper."""
import unittest
from typing import ClassVar
from unittest import mock
import pytest
@ -29,6 +30,7 @@ class MusicBrainzTestCase(BeetsTestCase):
def setUp(self):
super().setUp()
self.mb = musicbrainz.MusicBrainzPlugin()
self.config["match"]["preferred"]["countries"] = ["US"]
class MBAlbumInfoTest(MusicBrainzTestCase):
@ -80,6 +82,7 @@ class MBAlbumInfoTest(MusicBrainzTestCase):
"country": "COUNTRY",
"status": "STATUS",
"barcode": "BARCODE",
"release-events": [{"area": None, "date": "2021-03-26"}],
}
if multi_artist_credit:
@ -863,7 +866,7 @@ class MBLibraryTest(MusicBrainzTestCase):
]
with mock.patch(
"beetsplug.musicbrainz.MusicBrainzAPI.get_release"
"beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release"
) as gp:
gp.side_effect = side_effect
album = self.mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02")
@ -907,7 +910,7 @@ class MBLibraryTest(MusicBrainzTestCase):
]
with mock.patch(
"beetsplug.musicbrainz.MusicBrainzAPI.get_release"
"beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release"
) as gp:
gp.side_effect = side_effect
album = self.mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02")
@ -951,7 +954,7 @@ class MBLibraryTest(MusicBrainzTestCase):
]
with mock.patch(
"beetsplug.musicbrainz.MusicBrainzAPI.get_release"
"beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release"
) as gp:
gp.side_effect = side_effect
album = self.mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02")
@ -1004,7 +1007,7 @@ class MBLibraryTest(MusicBrainzTestCase):
]
with mock.patch(
"beetsplug.musicbrainz.MusicBrainzAPI.get_release"
"beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release"
) as gp:
gp.side_effect = side_effect
album = self.mb.album_for_id("d2a6f856-b553-40a0-ac54-a321e8e2da02")
@ -1015,7 +1018,11 @@ class TestMusicBrainzPlugin(PluginMixin):
plugin = "musicbrainz"
mbid = "d2a6f856-b553-40a0-ac54-a321e8e2da99"
RECORDING = {"title": "foo", "id": "bar", "length": 42}
RECORDING: ClassVar[dict[str, int | str]] = {
"title": "foo",
"id": "bar",
"length": 42,
}
@pytest.fixture
def plugin_config(self):
@ -1055,7 +1062,7 @@ class TestMusicBrainzPlugin(PluginMixin):
def test_item_candidates(self, monkeypatch, mb):
monkeypatch.setattr(
"beetsplug.musicbrainz.MusicBrainzAPI.get_json",
"beetsplug._utils.musicbrainz.MusicBrainzAPI.get_json",
lambda *_, **__: {"recordings": [self.RECORDING]},
)
@ -1066,11 +1073,11 @@ class TestMusicBrainzPlugin(PluginMixin):
def test_candidates(self, monkeypatch, mb):
monkeypatch.setattr(
"beetsplug.musicbrainz.MusicBrainzAPI.get_json",
"beetsplug._utils.musicbrainz.MusicBrainzAPI.get_json",
lambda *_, **__: {"releases": [{"id": self.mbid}]},
)
monkeypatch.setattr(
"beetsplug.musicbrainz.MusicBrainzAPI.get_release",
"beetsplug._utils.musicbrainz.MusicBrainzAPI.get_release",
lambda *_, **__: {
"title": "hi",
"id": self.mbid,
@ -1099,84 +1106,3 @@ class TestMusicBrainzPlugin(PluginMixin):
assert len(candidates) == 1
assert candidates[0].tracks[0].track_id == self.RECORDING["id"]
assert candidates[0].album == "hi"
def test_group_relations():
raw_release = {
"id": "r1",
"relations": [
{"target-type": "artist", "type": "vocal", "name": "A"},
{"target-type": "url", "type": "streaming", "url": "http://s"},
{"target-type": "url", "type": "purchase", "url": "http://p"},
{
"target-type": "work",
"type": "performance",
"work": {
"relations": [
{
"artist": {"name": "幾田りら"},
"target-type": "artist",
"type": "composer",
},
{
"target-type": "url",
"type": "lyrics",
"url": {
"resource": "https://utaten.com/lyric/tt24121002/"
},
},
{
"artist": {"name": "幾田りら"},
"target-type": "artist",
"type": "lyricist",
},
{
"target-type": "url",
"type": "lyrics",
"url": {
"resource": "https://www.uta-net.com/song/366579/"
},
},
],
"title": "百花繚乱",
"type": "Song",
},
},
],
}
assert musicbrainz.MusicBrainzAPI._group_relations(raw_release) == {
"id": "r1",
"artist-relations": [{"type": "vocal", "name": "A"}],
"url-relations": [
{"type": "streaming", "url": "http://s"},
{"type": "purchase", "url": "http://p"},
],
"work-relations": [
{
"type": "performance",
"work": {
"artist-relations": [
{"type": "composer", "artist": {"name": "幾田りら"}},
{"type": "lyricist", "artist": {"name": "幾田りら"}},
],
"url-relations": [
{
"type": "lyrics",
"url": {
"resource": "https://utaten.com/lyric/tt24121002/"
},
},
{
"type": "lyrics",
"url": {
"resource": "https://www.uta-net.com/song/366579/"
},
},
],
"title": "百花繚乱",
"type": "Song",
},
},
],
}

View file

@ -14,74 +14,10 @@
"""Tests for the 'parentwork' plugin."""
from unittest.mock import patch
import pytest
from beets.library import Item
from beets.test.helper import PluginTestCase
from beetsplug import parentwork
work = {
"work": {
"id": "1",
"title": "work",
"work-relation-list": [
{"type": "parts", "direction": "backward", "work": {"id": "2"}}
],
"artist-relation-list": [
{
"type": "composer",
"artist": {
"name": "random composer",
"sort-name": "composer, random",
},
}
],
}
}
dp_work = {
"work": {
"id": "2",
"title": "directparentwork",
"work-relation-list": [
{"type": "parts", "direction": "backward", "work": {"id": "3"}}
],
"artist-relation-list": [
{
"type": "composer",
"artist": {
"name": "random composer",
"sort-name": "composer, random",
},
}
],
}
}
p_work = {
"work": {
"id": "3",
"title": "parentwork",
"artist-relation-list": [
{
"type": "composer",
"artist": {
"name": "random composer",
"sort-name": "composer, random",
},
}
],
}
}
def mock_workid_response(mbid, includes):
if mbid == "1":
return work
elif mbid == "2":
return dp_work
elif mbid == "3":
return p_work
@pytest.mark.integration_test
@ -134,35 +70,56 @@ class ParentWorkIntegrationTest(PluginTestCase):
item.load()
assert item["mb_parentworkid"] == "XXX"
# test different cases, still with Matthew Passion Ouverture or Mozart
# requiem
def test_direct_parent_work_real(self):
mb_workid = "2e4a3668-458d-3b2a-8be2-0b08e0d8243a"
assert (
"f04b42df-7251-4d86-a5ee-67cfa49580d1"
== parentwork.direct_parent_id(mb_workid)[0]
)
assert (
"45afb3b2-18ac-4187-bc72-beb1b1c194ba"
== parentwork.work_parent_id(mb_workid)[0]
)
class ParentWorkTest(PluginTestCase):
plugin = "parentwork"
def setUp(self):
"""Set up configuration"""
super().setUp()
self.patcher = patch(
"musicbrainzngs.get_work_by_id", side_effect=mock_workid_response
@pytest.fixture(autouse=True)
def patch_works(self, requests_mock):
requests_mock.get(
"/ws/2/work/1?inc=work-rels%2Bartist-rels",
json={
"id": "1",
"title": "work",
"work-relations": [
{
"type": "parts",
"direction": "backward",
"work": {"id": "2"},
}
],
},
)
requests_mock.get(
"/ws/2/work/2?inc=work-rels%2Bartist-rels",
json={
"id": "2",
"title": "directparentwork",
"work-relations": [
{
"type": "parts",
"direction": "backward",
"work": {"id": "3"},
}
],
},
)
requests_mock.get(
"/ws/2/work/3?inc=work-rels%2Bartist-rels",
json={
"id": "3",
"title": "parentwork",
"artist-relations": [
{
"type": "composer",
"artist": {
"name": "random composer",
"sort-name": "composer, random",
},
}
],
},
)
self.patcher.start()
def tearDown(self):
super().tearDown()
self.patcher.stop()
def test_normal_case(self):
item = Item(path="/file", mb_workid="1", parentwork_workid_current="1")
@ -204,7 +161,3 @@ class ParentWorkTest(PluginTestCase):
item.load()
assert item["mb_parentworkid"] == "XXX"
def test_direct_parent_work(self):
assert "2" == parentwork.direct_parent_id("1")[0]
assert "3" == parentwork.work_parent_id("1")[0]

View file

@ -72,8 +72,8 @@ class RandomTest(TestHelper, unittest.TestCase):
print(f"{i:2d} {'*' * positions.count(i)}")
return self._stats(positions)
mean1, stdev1, median1 = experiment("artist")
mean2, stdev2, median2 = experiment("track")
_, stdev1, median1 = experiment("artist")
_, stdev2, median2 = experiment("track")
assert 0 == pytest.approx(median1, abs=1)
assert len(self.items) // 2 == pytest.approx(median2, abs=1)
assert stdev2 > stdev1

View file

View file

@ -0,0 +1,82 @@
from beetsplug._utils.musicbrainz import MusicBrainzAPI
def test_group_relations():
raw_release = {
"id": "r1",
"relations": [
{"target-type": "artist", "type": "vocal", "name": "A"},
{"target-type": "url", "type": "streaming", "url": "http://s"},
{"target-type": "url", "type": "purchase", "url": "http://p"},
{
"target-type": "work",
"type": "performance",
"work": {
"relations": [
{
"artist": {"name": "幾田りら"},
"target-type": "artist",
"type": "composer",
},
{
"target-type": "url",
"type": "lyrics",
"url": {
"resource": "https://utaten.com/lyric/tt24121002/"
},
},
{
"artist": {"name": "幾田りら"},
"target-type": "artist",
"type": "lyricist",
},
{
"target-type": "url",
"type": "lyrics",
"url": {
"resource": "https://www.uta-net.com/song/366579/"
},
},
],
"title": "百花繚乱",
"type": "Song",
},
},
],
}
assert MusicBrainzAPI._group_relations(raw_release) == {
"id": "r1",
"artist-relations": [{"type": "vocal", "name": "A"}],
"url-relations": [
{"type": "streaming", "url": "http://s"},
{"type": "purchase", "url": "http://p"},
],
"work-relations": [
{
"type": "performance",
"work": {
"artist-relations": [
{"type": "composer", "artist": {"name": "幾田りら"}},
{"type": "lyricist", "artist": {"name": "幾田りら"}},
],
"url-relations": [
{
"type": "lyrics",
"url": {
"resource": "https://utaten.com/lyric/tt24121002/"
},
},
{
"type": "lyrics",
"url": {
"resource": "https://www.uta-net.com/song/366579/"
},
},
],
"title": "百花繚乱",
"type": "Song",
},
},
],
}

Some files were not shown because too many files have changed in this diff Show more