diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs
index c6ec4cb5f..14b50859f 100644
--- a/.git-blame-ignore-revs
+++ b/.git-blame-ignore-revs
@@ -57,3 +57,19 @@ c490ac5810b70f3cf5fd8649669838e8fdb19f4d
769dcdc88a1263638ae25944ba6b2be3e8933666
# Reformat all docs using docstrfmt
ab5acaabb3cd24c482adb7fa4800c89fd6a2f08d
+# Replace format calls with f-strings
+4a361bd501e85de12c91c2474c423559ca672852
+# Replace percent formatting
+9352a79e4108bd67f7e40b1e944c01e0a7353272
+# Replace string concatenation (' + ')
+1c16b2b3087e9c3635d68d41c9541c4319d0bdbe
+# Do not use backslashes to deal with long strings
+2fccf64efe82851861e195b521b14680b480a42a
+# Do not use explicit indices for logging args when not needed
+d93ddf8dd43e4f9ed072a03829e287c78d2570a2
+# Moved dev docs
+07549ed896d9649562d40b75cd30702e6fa6e975
+# Moved plugin docs Further Reading chapter
+33f1a5d0bef8ca08be79ee7a0d02a018d502680d
+# Moved art.py utility module from beets into beetsplug
+28aee0fde463f1e18dfdba1994e2bdb80833722f
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 000000000..bb888d520
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1,5 @@
+# assign the entire repo to the maintainers team
+* @beetbox/maintainers
+
+# Specific ownerships:
+/beets/metadata_plugins.py @semohr
\ No newline at end of file
diff --git a/.github/workflows/changelog_reminder.yaml b/.github/workflows/changelog_reminder.yaml
index a9c26c1f5..380d89996 100644
--- a/.github/workflows/changelog_reminder.yaml
+++ b/.github/workflows/changelog_reminder.yaml
@@ -10,7 +10,7 @@ jobs:
check_changes:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Get all updated Python files
id: changed-python-files
diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 80826f468..e8a532956 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -20,17 +20,17 @@ jobs:
fail-fast: false
matrix:
platform: [ubuntu-latest, windows-latest]
- python-version: ["3.9", "3.10", "3.11", "3.12"]
+ python-version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
runs-on: ${{ matrix.platform }}
env:
IS_MAIN_PYTHON: ${{ matrix.python-version == '3.9' && matrix.platform == 'ubuntu-latest' }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- name: Setup Python with poetry caching
# poetry cache requires poetry to already be installed, weirdly
- uses: actions/setup-python@v5
+ uses: actions/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: poetry
@@ -39,7 +39,15 @@ jobs:
if: matrix.platform == 'ubuntu-latest'
run: |
sudo apt update
- sudo apt install --yes --no-install-recommends ffmpeg gobject-introspection gstreamer1.0-plugins-base python3-gst-1.0 libcairo2-dev libgirepository-2.0-dev pandoc imagemagick
+ sudo apt install --yes --no-install-recommends \
+ ffmpeg \
+ gobject-introspection \
+ gstreamer1.0-plugins-base \
+ python3-gst-1.0 \
+ libcairo2-dev \
+ libgirepository-2.0-dev \
+ pandoc \
+ imagemagick
- name: Get changed lyrics files
id: lyrics-update
@@ -90,10 +98,10 @@ jobs:
permissions:
id-token: write
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Get the coverage report
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v5
with:
name: coverage-report
diff --git a/.github/workflows/integration_test.yaml b/.github/workflows/integration_test.yaml
index f88864c48..8c7e44d7a 100644
--- a/.github/workflows/integration_test.yaml
+++ b/.github/workflows/integration_test.yaml
@@ -7,10 +7,10 @@ jobs:
test_integration:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: 3.9
cache: poetry
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml
index 0048a8f6e..dcc5d0f12 100644
--- a/.github/workflows/lint.yml
+++ b/.github/workflows/lint.yml
@@ -24,7 +24,7 @@ jobs:
changed_doc_files: ${{ steps.changed-doc-files.outputs.all_changed_files }}
changed_python_files: ${{ steps.changed-python-files.outputs.all_changed_files }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Get changed docs files
id: changed-doc-files
uses: tj-actions/changed-files@v46
@@ -56,10 +56,10 @@ jobs:
name: Check formatting
needs: changed-files
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
@@ -77,10 +77,10 @@ jobs:
name: Check linting
needs: changed-files
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
@@ -97,10 +97,10 @@ jobs:
name: Check types with mypy
needs: changed-files
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
@@ -120,10 +120,10 @@ jobs:
name: Check docs
needs: changed-files
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
@@ -143,4 +143,4 @@ jobs:
run: poe lint-docs
- name: Build docs
- run: poe docs -e 'SPHINXOPTS=--fail-on-warning --keep-going'
+ run: poe docs -- -e 'SPHINXOPTS=--fail-on-warning --keep-going'
diff --git a/.github/workflows/make_release.yaml b/.github/workflows/make_release.yaml
index b18dded8d..5a8abe5bb 100644
--- a/.github/workflows/make_release.yaml
+++ b/.github/workflows/make_release.yaml
@@ -17,10 +17,10 @@ jobs:
name: Bump version, commit and create tag
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
@@ -45,13 +45,13 @@ jobs:
outputs:
changelog: ${{ steps.generate_changelog.outputs.changelog }}
steps:
- - uses: actions/checkout@v4
+ - uses: actions/checkout@v5
with:
ref: ${{ env.NEW_TAG }}
- name: Install Python tools
uses: BrandonLWhite/pipx-install-action@v1.0.3
- - uses: actions/setup-python@v5
+ - uses: actions/setup-python@v6
with:
python-version: ${{ env.PYTHON_VERSION }}
cache: poetry
@@ -92,7 +92,7 @@ jobs:
id-token: write
steps:
- name: Download all the dists
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/
@@ -107,7 +107,7 @@ jobs:
CHANGELOG: ${{ needs.build.outputs.changelog }}
steps:
- name: Download all the dists
- uses: actions/download-artifact@v4
+ uses: actions/download-artifact@v5
with:
name: python-package-distributions
path: dist/
diff --git a/.gitignore b/.gitignore
index 90ef7387d..138965b22 100644
--- a/.gitignore
+++ b/.gitignore
@@ -94,3 +94,6 @@ ENV/
# pyright
pyrightconfig.json
+
+# Pyrefly
+pyrefly.toml
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index 92375b465..d19a376b3 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -180,8 +180,7 @@ Your First Contribution
If this is your first time contributing to an open source project, welcome! If
you are confused at all about how to contribute or what to contribute, take a
look at `this great tutorial `__, or stop by our
-`discussion board `__ if you have
-any questions.
+`discussion board`_ if you have any questions.
We maintain a list of issues we reserved for those new to open source labeled
`first timers only`_. Since the goal of these issues is to get users comfortable
@@ -216,6 +215,15 @@ will ship in no time.
Remember, code contributions have four parts: the code, the tests, the
documentation, and the changelog entry. Thank you for contributing!
+.. admonition:: Ownership
+
+ If you are the owner of a plugin, please consider reviewing pull requests
+ that affect your plugin. If you are not the owner of a plugin, please
+ consider becoming one! You can do so by adding an entry to
+ ``.github/CODEOWNERS``. This way, you will automatically receive a review
+ request for pull requests that adjust the code that you own. If you have any
+ questions, please ask on our `discussion board`_.
+
The Code
--------
@@ -238,25 +246,22 @@ There are a few coding conventions we use in beets:
.. code-block:: python
with g.lib.transaction() as tx:
- rows = tx.query(
- "SELECT DISTINCT '{0}' FROM '{1}' ORDER BY '{2}'".format(
- field, model._table, sort_field
- )
- )
+ rows = tx.query("SELECT DISTINCT {field} FROM {model._table} ORDER BY {sort_field}")
To fetch Item objects from the database, use lib.items(…) and supply a query
as an argument. Resist the urge to write raw SQL for your query. If you must
- use lower-level queries into the database, do this:
+ use lower-level queries into the database, do this, for example:
.. code-block:: python
with lib.transaction() as tx:
- rows = tx.query("SELECT …")
+ rows = tx.query("SELECT path FROM items WHERE album_id = ?", (album_id,))
Transaction objects help control concurrent access to the database and assist
in debugging conflicting accesses.
-- ``str.format()`` should be used instead of the ``%`` operator
+- f-strings should be used instead of the ``%`` operator and ``str.format()``
+ calls.
- Never ``print`` informational messages; use the `logging
`__ module instead. In
particular, we have our own logging shim, so you’ll see ``from beets import
@@ -264,7 +269,7 @@ There are a few coding conventions we use in beets:
- The loggers use `str.format
`__-style logging
- instead of ``%``-style, so you can type ``log.debug("{0}", obj)`` to do your
+ instead of ``%``-style, so you can type ``log.debug("{}", obj)`` to do your
formatting.
- Exception handlers must use ``except A as B:`` instead of ``except A, B:``.
@@ -281,31 +286,6 @@ according to the specifications required by the project.
Similarly, run ``poe format-docs`` and ``poe lint-docs`` to ensure consistent
documentation formatting and check for any issues.
-Handling Paths
-~~~~~~~~~~~~~~
-
-A great deal of convention deals with the handling of **paths**. Paths are
-stored internally—in the database, for instance—as byte strings (i.e., ``bytes``
-instead of ``str`` in Python 3). This is because POSIX operating systems’ path
-names are only reliably usable as byte strings—operating systems typically
-recommend but do not require that filenames use a given encoding, so violations
-of any reported encoding are inevitable. On Windows, the strings are always
-encoded with UTF-8; on Unix, the encoding is controlled by the filesystem. Here
-are some guidelines to follow:
-
-- If you have a Unicode path or you’re not sure whether something is Unicode or
- not, pass it through ``bytestring_path`` function in the ``beets.util`` module
- to convert it to bytes.
-- Pass every path name through the ``syspath`` function (also in ``beets.util``)
- before sending it to any *operating system* file operation (``open``, for
- example). This is necessary to use long filenames (which, maddeningly, must be
- Unicode) on Windows. This allows us to consistently store bytes in the
- database but use the native encoding rule on both POSIX and Windows.
-- Similarly, the ``displayable_path`` utility function converts bytestring paths
- to a Unicode string for displaying to the user. Every time you want to print
- out a string to the terminal or log it with the ``logging`` module, feed it
- through this function.
-
Editor Settings
~~~~~~~~~~~~~~~
@@ -397,6 +377,8 @@ This way, the test will be run only in the integration test suite.
.. _codecov: https://codecov.io/github/beetbox/beets
+.. _discussion board: https://github.com/beetbox/beets/discussions
+
.. _documentation: https://beets.readthedocs.io/en/stable/
.. _https://github.com/beetbox/beets/blob/master/test/test_template.py#l224: https://github.com/beetbox/beets/blob/master/test/test_template.py#L224
diff --git a/README.rst b/README.rst
index e8cec8ce9..3d5a84712 100644
--- a/README.rst
+++ b/README.rst
@@ -17,7 +17,7 @@ Beets is the media library management system for obsessive music geeks.
The purpose of beets is to get your music collection right once and for all. It
catalogs your collection, automatically improving its metadata as it goes. It
-then provides a bouquet of tools for manipulating and accessing your music.
+then provides a suite of tools for manipulating and accessing your music.
Here's an example of beets' brainy tag corrector doing its thing:
diff --git a/beets/__init__.py b/beets/__init__.py
index 8be305202..d448d8c49 100644
--- a/beets/__init__.py
+++ b/beets/__init__.py
@@ -17,10 +17,25 @@ from sys import stderr
import confuse
-__version__ = "2.3.1"
+from .util import deprecate_imports
+
+__version__ = "2.5.1"
__author__ = "Adrian Sampson "
+def __getattr__(name: str):
+ """Handle deprecated imports."""
+ return deprecate_imports(
+ old_module=__name__,
+ new_module_by_name={
+ "art": "beetsplug._utils",
+ "vfs": "beetsplug._utils",
+ },
+ name=name,
+ version="3.0.0",
+ )
+
+
class IncludeLazyConfig(confuse.LazyConfig):
"""A version of Confuse's LazyConfig that also merges in data from
YAML files specified in an `include` setting.
@@ -35,7 +50,7 @@ class IncludeLazyConfig(confuse.LazyConfig):
except confuse.NotFoundError:
pass
except confuse.ConfigReadError as err:
- stderr.write("configuration `import` failed: {}".format(err.reason))
+ stderr.write(f"configuration `import` failed: {err.reason}")
config = IncludeLazyConfig("beets", __name__)
diff --git a/beets/autotag/__init__.py b/beets/autotag/__init__.py
index 4d107b3a1..319f7f522 100644
--- a/beets/autotag/__init__.py
+++ b/beets/autotag/__init__.py
@@ -261,7 +261,7 @@ def apply_metadata(album_info: AlbumInfo, mapping: Mapping[Item, TrackInfo]):
continue
for suffix in "year", "month", "day":
- key = prefix + suffix
+ key = f"{prefix}{suffix}"
value = getattr(album_info, key) or 0
# If we don't even have a year, apply nothing.
diff --git a/beets/autotag/distance.py b/beets/autotag/distance.py
index 39d16858f..37c6f84f4 100644
--- a/beets/autotag/distance.py
+++ b/beets/autotag/distance.py
@@ -78,10 +78,10 @@ def string_dist(str1: str | None, str2: str | None) -> float:
# example, "the something" should be considered equal to
# "something, the".
for word in SD_END_WORDS:
- if str1.endswith(", %s" % word):
- str1 = "{} {}".format(word, str1[: -len(word) - 2])
- if str2.endswith(", %s" % word):
- str2 = "{} {}".format(word, str2[: -len(word) - 2])
+ if str1.endswith(f", {word}"):
+ str1 = f"{word} {str1[: -len(word) - 2]}"
+ if str2.endswith(f", {word}"):
+ str2 = f"{word} {str2[: -len(word) - 2]}"
# Perform a couple of basic normalizing substitutions.
for pat, repl in SD_REPLACE:
@@ -230,7 +230,7 @@ class Distance:
"""Adds all the distance penalties from `dist`."""
if not isinstance(dist, Distance):
raise ValueError(
- "`dist` must be a Distance object, not {}".format(type(dist))
+ f"`dist` must be a Distance object, not {type(dist)}"
)
for key, penalties in dist._penalties.items():
self._penalties.setdefault(key, []).extend(penalties)
@@ -345,6 +345,12 @@ class Distance:
dist = string_dist(str1, str2)
self.add(key, dist)
+ def add_data_source(self, before: str | None, after: str | None) -> None:
+ if before != after and (
+ before or len(metadata_plugins.find_metadata_source_plugins()) > 1
+ ):
+ self.add("data_source", metadata_plugins.get_penalty(after))
+
@cache
def get_track_length_grace() -> float:
@@ -408,8 +414,7 @@ def track_distance(
if track_info.medium and item.disc:
dist.add_expr("medium", item.disc != track_info.medium)
- # Plugins.
- dist.update(metadata_plugins.track_distance(item, track_info))
+ dist.add_data_source(item.get("data_source"), track_info.data_source)
return dist
@@ -444,7 +449,7 @@ def distance(
# Preferred media options.
media_patterns: Sequence[str] = preferred_config["media"].as_str_seq()
options = [
- re.compile(r"(\d+x)?(%s)" % pat, re.I) for pat in media_patterns
+ re.compile(rf"(\d+x)?({pat})", re.I) for pat in media_patterns
]
if options:
dist.add_priority("media", album_info.media, options)
@@ -525,7 +530,6 @@ def distance(
for _ in range(len(items) - len(mapping)):
dist.add("unmatched_tracks", 1.0)
- # Plugins.
- dist.update(metadata_plugins.album_distance(items, album_info, mapping))
+ dist.add_data_source(likelies["data_source"], album_info.data_source)
return dist
diff --git a/beets/autotag/hooks.py b/beets/autotag/hooks.py
index 7cd215fc4..b809609ea 100644
--- a/beets/autotag/hooks.py
+++ b/beets/autotag/hooks.py
@@ -16,236 +16,201 @@
from __future__ import annotations
+from copy import deepcopy
from typing import TYPE_CHECKING, Any, NamedTuple, TypeVar
-from beets import logging
+from typing_extensions import Self
if TYPE_CHECKING:
from beets.library import Item
from .distance import Distance
-log = logging.getLogger("beets")
-
V = TypeVar("V")
# Classes used to represent candidate options.
class AttrDict(dict[str, V]):
- """A dictionary that supports attribute ("dot") access, so `d.field`
- is equivalent to `d['field']`.
- """
+ """Mapping enabling attribute-style access to stored metadata values."""
+
+ def copy(self) -> Self:
+ return deepcopy(self)
def __getattr__(self, attr: str) -> V:
if attr in self:
return self[attr]
- else:
- raise AttributeError
- def __setattr__(self, key: str, value: V):
+ raise AttributeError(
+ f"'{self.__class__.__name__}' object has no attribute '{attr}'"
+ )
+
+ def __setattr__(self, key: str, value: V) -> None:
self.__setitem__(key, value)
- def __hash__(self):
+ def __hash__(self) -> int: # type: ignore[override]
return id(self)
-class AlbumInfo(AttrDict[Any]):
- """Describes a canonical release that may be used to match a release
- in the library. Consists of these data members:
+class Info(AttrDict[Any]):
+ """Container for metadata about a musical entity."""
- - ``album``: the release title
- - ``album_id``: MusicBrainz ID; UUID fragment only
- - ``artist``: name of the release's primary artist
- - ``artist_id``
- - ``tracks``: list of TrackInfo objects making up the release
+ def __init__(
+ self,
+ album: str | None = None,
+ artist_credit: str | None = None,
+ artist_id: str | None = None,
+ artist: str | None = None,
+ artists_credit: list[str] | None = None,
+ artists_ids: list[str] | None = None,
+ artists: list[str] | None = None,
+ artist_sort: str | None = None,
+ artists_sort: list[str] | None = None,
+ data_source: str | None = None,
+ data_url: str | None = None,
+ genre: str | None = None,
+ media: str | None = None,
+ **kwargs,
+ ) -> None:
+ self.album = album
+ self.artist = artist
+ self.artist_credit = artist_credit
+ self.artist_id = artist_id
+ self.artists = artists or []
+ self.artists_credit = artists_credit or []
+ self.artists_ids = artists_ids or []
+ self.artist_sort = artist_sort
+ self.artists_sort = artists_sort or []
+ self.data_source = data_source
+ self.data_url = data_url
+ self.genre = genre
+ self.media = media
+ self.update(kwargs)
- ``mediums`` along with the fields up through ``tracks`` are required.
- The others are optional and may be None.
+
+class AlbumInfo(Info):
+ """Metadata snapshot representing a single album candidate.
+
+ Aggregates track entries and album-wide context gathered from an external
+ provider. Used during matching to evaluate similarity against a group of
+ user items, and later to drive tagging decisions once selected.
"""
- # TYPING: are all of these correct? I've assumed optional strings
def __init__(
self,
tracks: list[TrackInfo],
- album: str | None = None,
+ *,
album_id: str | None = None,
- artist: str | None = None,
- artist_id: str | None = None,
- artists: list[str] | None = None,
- artists_ids: list[str] | None = None,
- asin: str | None = None,
+ albumdisambig: str | None = None,
+ albumstatus: str | None = None,
albumtype: str | None = None,
albumtypes: list[str] | None = None,
+ asin: str | None = None,
+ barcode: str | None = None,
+ catalognum: str | None = None,
+ country: str | None = None,
+ day: int | None = None,
+ discogs_albumid: str | None = None,
+ discogs_artistid: str | None = None,
+ discogs_labelid: str | None = None,
+ label: str | None = None,
+ language: str | None = None,
+ mediums: int | None = None,
+ month: int | None = None,
+ original_day: int | None = None,
+ original_month: int | None = None,
+ original_year: int | None = None,
+ release_group_title: str | None = None,
+ releasegroup_id: str | None = None,
+ releasegroupdisambig: str | None = None,
+ script: str | None = None,
+ style: str | None = None,
va: bool = False,
year: int | None = None,
- month: int | None = None,
- day: int | None = None,
- label: str | None = None,
- barcode: str | None = None,
- mediums: int | None = None,
- artist_sort: str | None = None,
- artists_sort: list[str] | None = None,
- releasegroup_id: str | None = None,
- release_group_title: str | None = None,
- catalognum: str | None = None,
- script: str | None = None,
- language: str | None = None,
- country: str | None = None,
- style: str | None = None,
- genre: str | None = None,
- albumstatus: str | None = None,
- media: str | None = None,
- albumdisambig: str | None = None,
- releasegroupdisambig: str | None = None,
- artist_credit: str | None = None,
- artists_credit: list[str] | None = None,
- original_year: int | None = None,
- original_month: int | None = None,
- original_day: int | None = None,
- data_source: str | None = None,
- data_url: str | None = None,
- discogs_albumid: str | None = None,
- discogs_labelid: str | None = None,
- discogs_artistid: str | None = None,
**kwargs,
- ):
- self.album = album
- self.album_id = album_id
- self.artist = artist
- self.artist_id = artist_id
- self.artists = artists or []
- self.artists_ids = artists_ids or []
+ ) -> None:
self.tracks = tracks
- self.asin = asin
+ self.album_id = album_id
+ self.albumdisambig = albumdisambig
+ self.albumstatus = albumstatus
self.albumtype = albumtype
self.albumtypes = albumtypes or []
+ self.asin = asin
+ self.barcode = barcode
+ self.catalognum = catalognum
+ self.country = country
+ self.day = day
+ self.discogs_albumid = discogs_albumid
+ self.discogs_artistid = discogs_artistid
+ self.discogs_labelid = discogs_labelid
+ self.label = label
+ self.language = language
+ self.mediums = mediums
+ self.month = month
+ self.original_day = original_day
+ self.original_month = original_month
+ self.original_year = original_year
+ self.release_group_title = release_group_title
+ self.releasegroup_id = releasegroup_id
+ self.releasegroupdisambig = releasegroupdisambig
+ self.script = script
+ self.style = style
self.va = va
self.year = year
- self.month = month
- self.day = day
- self.label = label
- self.barcode = barcode
- self.mediums = mediums
- self.artist_sort = artist_sort
- self.artists_sort = artists_sort or []
- self.releasegroup_id = releasegroup_id
- self.release_group_title = release_group_title
- self.catalognum = catalognum
- self.script = script
- self.language = language
- self.country = country
- self.style = style
- self.genre = genre
- self.albumstatus = albumstatus
- self.media = media
- self.albumdisambig = albumdisambig
- self.releasegroupdisambig = releasegroupdisambig
- self.artist_credit = artist_credit
- self.artists_credit = artists_credit or []
- self.original_year = original_year
- self.original_month = original_month
- self.original_day = original_day
- self.data_source = data_source
- self.data_url = data_url
- self.discogs_albumid = discogs_albumid
- self.discogs_labelid = discogs_labelid
- self.discogs_artistid = discogs_artistid
- self.update(kwargs)
-
- def copy(self) -> AlbumInfo:
- dupe = AlbumInfo([])
- dupe.update(self)
- dupe.tracks = [track.copy() for track in self.tracks]
- return dupe
+ super().__init__(**kwargs)
-class TrackInfo(AttrDict[Any]):
- """Describes a canonical track present on a release. Appears as part
- of an AlbumInfo's ``tracks`` list. Consists of these data members:
+class TrackInfo(Info):
+ """Metadata snapshot for a single track candidate.
- - ``title``: name of the track
- - ``track_id``: MusicBrainz ID; UUID fragment only
-
- Only ``title`` and ``track_id`` are required. The rest of the fields
- may be None. The indices ``index``, ``medium``, and ``medium_index``
- are all 1-based.
+ Captures identifying details and creative credits used to compare against
+ a user's item. Instances often originate within an AlbumInfo but may also
+ stand alone for singleton matching.
"""
- # TYPING: are all of these correct? I've assumed optional strings
def __init__(
self,
- title: str | None = None,
- track_id: str | None = None,
- release_track_id: str | None = None,
- artist: str | None = None,
- artist_id: str | None = None,
- artists: list[str] | None = None,
- artists_ids: list[str] | None = None,
- length: float | None = None,
+ *,
+ arranger: str | None = None,
+ bpm: str | None = None,
+ composer: str | None = None,
+ composer_sort: str | None = None,
+ disctitle: str | None = None,
index: int | None = None,
+ initial_key: str | None = None,
+ length: float | None = None,
+ lyricist: str | None = None,
+ mb_workid: str | None = None,
medium: int | None = None,
medium_index: int | None = None,
medium_total: int | None = None,
- artist_sort: str | None = None,
- artists_sort: list[str] | None = None,
- disctitle: str | None = None,
- artist_credit: str | None = None,
- artists_credit: list[str] | None = None,
- data_source: str | None = None,
- data_url: str | None = None,
- media: str | None = None,
- lyricist: str | None = None,
- composer: str | None = None,
- composer_sort: str | None = None,
- arranger: str | None = None,
+ release_track_id: str | None = None,
+ title: str | None = None,
track_alt: str | None = None,
+ track_id: str | None = None,
work: str | None = None,
- mb_workid: str | None = None,
work_disambig: str | None = None,
- bpm: str | None = None,
- initial_key: str | None = None,
- genre: str | None = None,
- album: str | None = None,
**kwargs,
- ):
- self.title = title
- self.track_id = track_id
- self.release_track_id = release_track_id
- self.artist = artist
- self.artist_id = artist_id
- self.artists = artists or []
- self.artists_ids = artists_ids or []
- self.length = length
+ ) -> None:
+ self.arranger = arranger
+ self.bpm = bpm
+ self.composer = composer
+ self.composer_sort = composer_sort
+ self.disctitle = disctitle
self.index = index
- self.media = media
+ self.initial_key = initial_key
+ self.length = length
+ self.lyricist = lyricist
+ self.mb_workid = mb_workid
self.medium = medium
self.medium_index = medium_index
self.medium_total = medium_total
- self.artist_sort = artist_sort
- self.artists_sort = artists_sort or []
- self.disctitle = disctitle
- self.artist_credit = artist_credit
- self.artists_credit = artists_credit or []
- self.data_source = data_source
- self.data_url = data_url
- self.lyricist = lyricist
- self.composer = composer
- self.composer_sort = composer_sort
- self.arranger = arranger
+ self.release_track_id = release_track_id
+ self.title = title
self.track_alt = track_alt
+ self.track_id = track_id
self.work = work
- self.mb_workid = mb_workid
self.work_disambig = work_disambig
- self.bpm = bpm
- self.initial_key = initial_key
- self.genre = genre
- self.album = album
- self.update(kwargs)
-
- def copy(self) -> TrackInfo:
- dupe = TrackInfo()
- dupe.update(self)
- return dupe
+ super().__init__(**kwargs)
# Structures that compose all the information for a candidate match.
diff --git a/beets/autotag/match.py b/beets/autotag/match.py
index e74d21755..8fec844a6 100644
--- a/beets/autotag/match.py
+++ b/beets/autotag/match.py
@@ -118,7 +118,7 @@ def match_by_id(items: Iterable[Item]) -> AlbumInfo | None:
log.debug("No album ID consensus.")
return None
# If all album IDs are equal, look up the album.
- log.debug("Searching for discovered album ID: {0}", first)
+ log.debug("Searching for discovered album ID: {}", first)
return metadata_plugins.album_for_id(first)
@@ -197,9 +197,7 @@ def _add_candidate(
checking the track count, ordering the items, checking for
duplicates, and calculating the distance.
"""
- log.debug(
- "Candidate: {0} - {1} ({2})", info.artist, info.album, info.album_id
- )
+ log.debug("Candidate: {0.artist} - {0.album} ({0.album_id})", info)
# Discard albums with zero tracks.
if not info.tracks:
@@ -215,7 +213,7 @@ def _add_candidate(
required_tags: Sequence[str] = config["match"]["required"].as_str_seq()
for req_tag in required_tags:
if getattr(info, req_tag) is None:
- log.debug("Ignored. Missing required tag: {0}", req_tag)
+ log.debug("Ignored. Missing required tag: {}", req_tag)
return
# Find mapping between the items and the track info.
@@ -229,10 +227,10 @@ def _add_candidate(
ignored_tags: Sequence[str] = config["match"]["ignored"].as_str_seq()
for penalty in ignored_tags:
if penalty in penalties:
- log.debug("Ignored. Penalty: {0}", penalty)
+ log.debug("Ignored. Penalty: {}", penalty)
return
- log.debug("Success. Distance: {0}", dist)
+ log.debug("Success. Distance: {}", dist)
results[info.album_id] = hooks.AlbumMatch(
dist, info, mapping, extra_items, extra_tracks
)
@@ -265,7 +263,7 @@ def tag_album(
likelies, consensus = get_most_common_tags(items)
cur_artist: str = likelies["artist"]
cur_album: str = likelies["album"]
- log.debug("Tagging {0} - {1}", cur_artist, cur_album)
+ log.debug("Tagging {} - {}", cur_artist, cur_album)
# The output result, keys are the MB album ID.
candidates: dict[Any, AlbumMatch] = {}
@@ -273,7 +271,7 @@ def tag_album(
# Search by explicit ID.
if search_ids:
for search_id in search_ids:
- log.debug("Searching for album ID: {0}", search_id)
+ log.debug("Searching for album ID: {}", search_id)
if info := metadata_plugins.album_for_id(search_id):
_add_candidate(items, candidates, info)
@@ -283,7 +281,7 @@ def tag_album(
if info := match_by_id(items):
_add_candidate(items, candidates, info)
rec = _recommendation(list(candidates.values()))
- log.debug("Album ID match recommendation is {0}", rec)
+ log.debug("Album ID match recommendation is {}", rec)
if candidates and not config["import"]["timid"]:
# If we have a very good MBID match, return immediately.
# Otherwise, this match will compete against metadata-based
@@ -300,7 +298,7 @@ def tag_album(
if not (search_artist and search_album):
# No explicit search terms -- use current metadata.
search_artist, search_album = cur_artist, cur_album
- log.debug("Search terms: {0} - {1}", search_artist, search_album)
+ log.debug("Search terms: {} - {}", search_artist, search_album)
# Is this album likely to be a "various artist" release?
va_likely = (
@@ -308,7 +306,7 @@ def tag_album(
or (search_artist.lower() in VA_ARTISTS)
or any(item.comp for item in items)
)
- log.debug("Album might be VA: {0}", va_likely)
+ log.debug("Album might be VA: {}", va_likely)
# Get the results from the data sources.
for matched_candidate in metadata_plugins.candidates(
@@ -316,7 +314,7 @@ def tag_album(
):
_add_candidate(items, candidates, matched_candidate)
- log.debug("Evaluating {0} candidates.", len(candidates))
+ log.debug("Evaluating {} candidates.", len(candidates))
# Sort and get the recommendation.
candidates_sorted = _sort_candidates(candidates.values())
rec = _recommendation(candidates_sorted)
@@ -345,7 +343,7 @@ def tag_item(
trackids = search_ids or [t for t in [item.mb_trackid] if t]
if trackids:
for trackid in trackids:
- log.debug("Searching for track ID: {0}", trackid)
+ log.debug("Searching for track ID: {}", trackid)
if info := metadata_plugins.track_for_id(trackid):
dist = track_distance(item, info, incl_artist=True)
candidates[info.track_id] = hooks.TrackMatch(dist, info)
@@ -369,7 +367,7 @@ def tag_item(
# Search terms.
search_artist = search_artist or item.artist
search_title = search_title or item.title
- log.debug("Item search terms: {0} - {1}", search_artist, search_title)
+ log.debug("Item search terms: {} - {}", search_artist, search_title)
# Get and evaluate candidate metadata.
for track_info in metadata_plugins.item_candidates(
@@ -379,7 +377,7 @@ def tag_item(
candidates[track_info.track_id] = hooks.TrackMatch(dist, track_info)
# Sort by distance and return with recommendation.
- log.debug("Found {0} candidates.", len(candidates))
+ log.debug("Found {} candidates.", len(candidates))
candidates_sorted = _sort_candidates(candidates.values())
rec = _recommendation(candidates_sorted)
return Proposal(candidates_sorted, rec)
diff --git a/beets/config_default.yaml b/beets/config_default.yaml
index d1329f494..c0bab8056 100644
--- a/beets/config_default.yaml
+++ b/beets/config_default.yaml
@@ -127,19 +127,12 @@ ui:
action_default: ['bold', 'cyan']
action: ['bold', 'cyan']
# New Colors
- text: ['normal']
text_faint: ['faint']
import_path: ['bold', 'blue']
import_path_items: ['bold', 'blue']
- added: ['green']
- removed: ['red']
changed: ['yellow']
- added_highlight: ['bold', 'green']
- removed_highlight: ['bold', 'red']
- changed_highlight: ['bold', 'yellow']
- text_diff_added: ['bold', 'red']
+ text_diff_added: ['bold', 'green']
text_diff_removed: ['bold', 'red']
- text_diff_changed: ['bold', 'red']
action_description: ['white']
import:
indentation:
@@ -173,7 +166,7 @@ match:
missing_tracks: medium
unmatched_tracks: medium
distance_weights:
- source: 2.0
+ data_source: 2.0
artist: 3.0
album: 3.0
media: 1.0
diff --git a/beets/dbcore/db.py b/beets/dbcore/db.py
index 81c1be4b9..afae6e906 100755
--- a/beets/dbcore/db.py
+++ b/beets/dbcore/db.py
@@ -17,15 +17,17 @@
from __future__ import annotations
import contextlib
+import functools
import os
import re
import sqlite3
+import sys
import threading
import time
from abc import ABC
from collections import defaultdict
from collections.abc import Generator, Iterable, Iterator, Mapping, Sequence
-from sqlite3 import Connection
+from sqlite3 import Connection, sqlite_version_info
from typing import TYPE_CHECKING, Any, AnyStr, Callable, Generic
from typing_extensions import TypeVar # default value support
@@ -64,6 +66,16 @@ class DBAccessError(Exception):
"""
+class DBCustomFunctionError(Exception):
+ """A sqlite function registered by beets failed."""
+
+ def __init__(self):
+ super().__init__(
+ "beets defined SQLite function failed; "
+ "see the other errors above for details"
+ )
+
+
class FormattedMapping(Mapping[str, str]):
"""A `dict`-like formatted view of a model.
@@ -390,9 +402,9 @@ class Model(ABC, Generic[D]):
return obj
def __repr__(self) -> str:
- return "{}({})".format(
- type(self).__name__,
- ", ".join(f"{k}={v!r}" for k, v in dict(self).items()),
+ return (
+ f"{type(self).__name__}"
+ f"({', '.join(f'{k}={v!r}' for k, v in dict(self).items())})"
)
def clear_dirty(self):
@@ -409,9 +421,9 @@ class Model(ABC, Generic[D]):
exception is raised otherwise.
"""
if not self._db:
- raise ValueError("{} has no database".format(type(self).__name__))
+ raise ValueError(f"{type(self).__name__} has no database")
if need_id and not self.id:
- raise ValueError("{} has no id".format(type(self).__name__))
+ raise ValueError(f"{type(self).__name__} has no id")
return self._db
@@ -588,16 +600,14 @@ class Model(ABC, Generic[D]):
for key in fields:
if key != "id" and key in self._dirty:
self._dirty.remove(key)
- assignments.append(key + "=?")
+ assignments.append(f"{key}=?")
value = self._type(key).to_sql(self[key])
subvars.append(value)
with db.transaction() as tx:
# Main table update.
if assignments:
- query = "UPDATE {} SET {} WHERE id=?".format(
- self._table, ",".join(assignments)
- )
+ query = f"UPDATE {self._table} SET {','.join(assignments)} WHERE id=?"
subvars.append(self.id)
tx.mutate(query, subvars)
@@ -607,9 +617,9 @@ class Model(ABC, Generic[D]):
self._dirty.remove(key)
value = self._type(key).to_sql(value)
tx.mutate(
- "INSERT INTO {} "
+ f"INSERT INTO {self._flex_table} "
"(entity_id, key, value) "
- "VALUES (?, ?, ?);".format(self._flex_table),
+ "VALUES (?, ?, ?);",
(self.id, key, value),
)
@@ -930,10 +940,10 @@ class Transaction:
def __exit__(
self,
- exc_type: type[Exception],
- exc_value: Exception,
- traceback: TracebackType,
- ):
+ exc_type: type[BaseException] | None,
+ exc_value: BaseException | None,
+ traceback: TracebackType | None,
+ ) -> bool | None:
"""Complete a transaction. This must be the most recently
entered but not yet exited transaction. If it is the last active
transaction, the database updates are committed.
@@ -949,6 +959,14 @@ class Transaction:
self._mutated = False
self.db._db_lock.release()
+ if (
+ isinstance(exc_value, sqlite3.OperationalError)
+ and exc_value.args[0] == "user-defined function raised exception"
+ ):
+ raise DBCustomFunctionError()
+
+ return None
+
def query(
self, statement: str, subvals: Sequence[SQLiteType] = ()
) -> list[sqlite3.Row]:
@@ -1009,6 +1027,13 @@ class Database:
"sqlite3 must be compiled with multi-threading support"
)
+ # Print tracebacks for exceptions in user defined functions
+ # See also `self.add_functions` and `DBCustomFunctionError`.
+ #
+ # `if`: use feature detection because PyPy doesn't support this.
+ if hasattr(sqlite3, "enable_callback_tracebacks"):
+ sqlite3.enable_callback_tracebacks(True)
+
self.path = path
self.timeout = timeout
@@ -1104,9 +1129,16 @@ class Database:
return bytestring
- conn.create_function("regexp", 2, regexp)
- conn.create_function("unidecode", 1, unidecode)
- conn.create_function("bytelower", 1, bytelower)
+ create_function = conn.create_function
+ if sys.version_info >= (3, 8) and sqlite_version_info >= (3, 8, 3):
+ # Let sqlite make extra optimizations
+ create_function = functools.partial(
+ conn.create_function, deterministic=True
+ )
+
+ create_function("regexp", 2, regexp)
+ create_function("unidecode", 1, unidecode)
+ create_function("bytelower", 1, bytelower)
def _close(self):
"""Close the all connections to the underlying SQLite database
@@ -1160,7 +1192,7 @@ class Database:
"""
# Get current schema.
with self.transaction() as tx:
- rows = tx.query("PRAGMA table_info(%s)" % table)
+ rows = tx.query(f"PRAGMA table_info({table})")
current_fields = {row[1] for row in rows}
field_names = set(fields.keys())
@@ -1173,9 +1205,7 @@ class Database:
columns = []
for name, typ in fields.items():
columns.append(f"{name} {typ.sql}")
- setup_sql = "CREATE TABLE {} ({});\n".format(
- table, ", ".join(columns)
- )
+ setup_sql = f"CREATE TABLE {table} ({', '.join(columns)});\n"
else:
# Table exists does not match the field set.
@@ -1183,8 +1213,8 @@ class Database:
for name, typ in fields.items():
if name in current_fields:
continue
- setup_sql += "ALTER TABLE {} ADD COLUMN {} {};\n".format(
- table, name, typ.sql
+ setup_sql += (
+ f"ALTER TABLE {table} ADD COLUMN {name} {typ.sql};\n"
)
with self.transaction() as tx:
@@ -1195,18 +1225,16 @@ class Database:
for the given entity (if they don't exist).
"""
with self.transaction() as tx:
- tx.script(
- """
- CREATE TABLE IF NOT EXISTS {0} (
+ tx.script(f"""
+ CREATE TABLE IF NOT EXISTS {flex_table} (
id INTEGER PRIMARY KEY,
entity_id INTEGER,
key TEXT,
value TEXT,
UNIQUE(entity_id, key) ON CONFLICT REPLACE);
- CREATE INDEX IF NOT EXISTS {0}_by_entity
- ON {0} (entity_id);
- """.format(flex_table)
- )
+ CREATE INDEX IF NOT EXISTS {flex_table}_by_entity
+ ON {flex_table} (entity_id);
+ """)
# Querying.
diff --git a/beets/dbcore/query.py b/beets/dbcore/query.py
index 49d7f6428..dfeb42707 100644
--- a/beets/dbcore/query.py
+++ b/beets/dbcore/query.py
@@ -190,7 +190,7 @@ class MatchQuery(FieldQuery[AnySQLiteType]):
"""A query that looks for exact matches in an Model field."""
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
- return self.field + " = ?", [self.pattern]
+ return f"{self.field} = ?", [self.pattern]
@classmethod
def value_match(cls, pattern: AnySQLiteType, value: Any) -> bool:
@@ -204,7 +204,7 @@ class NoneQuery(FieldQuery[None]):
super().__init__(field, None, fast)
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
- return self.field + " IS NULL", ()
+ return f"{self.field} IS NULL", ()
def match(self, obj: Model) -> bool:
return obj.get(self.field_name) is None
@@ -246,7 +246,7 @@ class StringQuery(StringFieldQuery[str]):
.replace("%", "\\%")
.replace("_", "\\_")
)
- clause = self.field + " like ? escape '\\'"
+ clause = f"{self.field} like ? escape '\\'"
subvals = [search]
return clause, subvals
@@ -264,8 +264,8 @@ class SubstringQuery(StringFieldQuery[str]):
.replace("%", "\\%")
.replace("_", "\\_")
)
- search = "%" + pattern + "%"
- clause = self.field + " like ? escape '\\'"
+ search = f"%{pattern}%"
+ clause = f"{self.field} like ? escape '\\'"
subvals = [search]
return clause, subvals
@@ -471,11 +471,11 @@ class NumericQuery(FieldQuery[str]):
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
if self.point is not None:
- return self.field + "=?", (self.point,)
+ return f"{self.field}=?", (self.point,)
else:
if self.rangemin is not None and self.rangemax is not None:
return (
- "{0} >= ? AND {0} <= ?".format(self.field),
+ f"{self.field} >= ? AND {self.field} <= ?",
(self.rangemin, self.rangemax),
)
elif self.rangemin is not None:
@@ -549,9 +549,9 @@ class CollectionQuery(Query):
if not subq_clause:
# Fall back to slow query.
return None, ()
- clause_parts.append("(" + subq_clause + ")")
+ clause_parts.append(f"({subq_clause})")
subvals += subq_subvals
- clause = (" " + joiner + " ").join(clause_parts)
+ clause = f" {joiner} ".join(clause_parts)
return clause, subvals
def __repr__(self) -> str:
@@ -690,9 +690,7 @@ class Period:
("%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"), # second
)
relative_units = {"y": 365, "m": 30, "w": 7, "d": 1}
- relative_re = (
- "(?P[+|-]?)(?P[0-9]+)" + "(?P[y|m|w|d])"
- )
+ relative_re = "(?P[+|-]?)(?P[0-9]+)(?P[y|m|w|d])"
def __init__(self, date: datetime, precision: str):
"""Create a period with the given date (a `datetime` object) and
@@ -800,9 +798,7 @@ class DateInterval:
def __init__(self, start: datetime | None, end: datetime | None):
if start is not None and end is not None and not start < end:
- raise ValueError(
- "start date {} is not before end date {}".format(start, end)
- )
+ raise ValueError(f"start date {start} is not before end date {end}")
self.start = start
self.end = end
@@ -850,8 +846,6 @@ class DateQuery(FieldQuery[str]):
date = datetime.fromtimestamp(timestamp)
return self.interval.contains(date)
- _clause_tmpl = "{0} {1} ?"
-
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
clause_parts = []
subvals = []
@@ -859,11 +853,11 @@ class DateQuery(FieldQuery[str]):
# Convert the `datetime` objects to an integer number of seconds since
# the (local) Unix epoch using `datetime.timestamp()`.
if self.interval.start:
- clause_parts.append(self._clause_tmpl.format(self.field, ">="))
+ clause_parts.append(f"{self.field} >= ?")
subvals.append(int(self.interval.start.timestamp()))
if self.interval.end:
- clause_parts.append(self._clause_tmpl.format(self.field, "<"))
+ clause_parts.append(f"{self.field} < ?")
subvals.append(int(self.interval.end.timestamp()))
if clause_parts:
@@ -1074,9 +1068,9 @@ class FixedFieldSort(FieldSort):
if self.case_insensitive:
field = (
"(CASE "
- "WHEN TYPEOF({0})='text' THEN LOWER({0}) "
- "WHEN TYPEOF({0})='blob' THEN LOWER({0}) "
- "ELSE {0} END)".format(self.field)
+ f"WHEN TYPEOF({self.field})='text' THEN LOWER({self.field}) "
+ f"WHEN TYPEOF({self.field})='blob' THEN LOWER({self.field}) "
+ f"ELSE {self.field} END)"
)
else:
field = self.field
diff --git a/beets/dbcore/types.py b/beets/dbcore/types.py
index 1b8434a0b..3b4badd33 100644
--- a/beets/dbcore/types.py
+++ b/beets/dbcore/types.py
@@ -194,7 +194,7 @@ class BasePaddedInt(BaseInteger[N]):
self.digits = digits
def format(self, value: int | N) -> str:
- return "{0:0{1}d}".format(value or 0, self.digits)
+ return f"{value or 0:0{self.digits}d}"
class PaddedInt(BasePaddedInt[int]):
@@ -219,7 +219,7 @@ class ScaledInt(Integer):
self.suffix = suffix
def format(self, value: int) -> str:
- return "{}{}".format((value or 0) // self.unit, self.suffix)
+ return f"{(value or 0) // self.unit}{self.suffix}"
class Id(NullInteger):
@@ -249,7 +249,7 @@ class BaseFloat(Type[float, N]):
self.digits = digits
def format(self, value: float | N) -> str:
- return "{0:.{1}f}".format(value or 0, self.digits)
+ return f"{value or 0:.{self.digits}f}"
class Float(BaseFloat[float]):
diff --git a/beets/importer/session.py b/beets/importer/session.py
index e45644fa3..46277837e 100644
--- a/beets/importer/session.py
+++ b/beets/importer/session.py
@@ -150,7 +150,7 @@ class ImportSession:
"""Log a message about a given album to the importer log. The status
should reflect the reason the album couldn't be tagged.
"""
- self.logger.info("{0} {1}", status, displayable_path(paths))
+ self.logger.info("{} {}", status, displayable_path(paths))
def log_choice(self, task: ImportTask, duplicate=False):
"""Logs the task's current choice if it should be logged. If
@@ -187,7 +187,7 @@ class ImportSession:
def run(self):
"""Run the import task."""
- self.logger.info("import started {0}", time.asctime())
+ self.logger.info("import started {}", time.asctime())
self.set_config(config["import"])
# Set up the pipeline.
@@ -297,7 +297,7 @@ class ImportSession:
# Either accept immediately or prompt for input to decide.
if self.want_resume is True or self.should_resume(toppath):
log.warning(
- "Resuming interrupted import of {0}",
+ "Resuming interrupted import of {}",
util.displayable_path(toppath),
)
self._is_resuming[toppath] = True
diff --git a/beets/importer/stages.py b/beets/importer/stages.py
index 24ff815f3..d99b742a2 100644
--- a/beets/importer/stages.py
+++ b/beets/importer/stages.py
@@ -58,11 +58,11 @@ def read_tasks(session: ImportSession):
skipped += task_factory.skipped
if not task_factory.imported:
- log.warning("No files imported from {0}", displayable_path(toppath))
+ log.warning("No files imported from {}", displayable_path(toppath))
# Show skipped directories (due to incremental/resume).
if skipped:
- log.info("Skipped {0} paths.", skipped)
+ log.info("Skipped {} paths.", skipped)
def query_tasks(session: ImportSession):
@@ -82,10 +82,7 @@ def query_tasks(session: ImportSession):
# Search for albums.
for album in session.lib.albums(session.query):
log.debug(
- "yielding album {0}: {1} - {2}",
- album.id,
- album.albumartist,
- album.album,
+ "yielding album {0.id}: {0.albumartist} - {0.album}", album
)
items = list(album.items())
_freshen_items(items)
@@ -140,7 +137,7 @@ def lookup_candidates(session: ImportSession, task: ImportTask):
return
plugins.send("import_task_start", session=session, task=task)
- log.debug("Looking up: {0}", displayable_path(task.paths))
+ log.debug("Looking up: {}", displayable_path(task.paths))
# Restrict the initial lookup to IDs specified by the user via the -m
# option. Currently all the IDs are passed onto the tasks directly.
@@ -259,11 +256,11 @@ def plugin_stage(
def log_files(session: ImportSession, task: ImportTask):
"""A coroutine (pipeline stage) to log each file to be imported."""
if isinstance(task, SingletonImportTask):
- log.info("Singleton: {0}", displayable_path(task.item["path"]))
+ log.info("Singleton: {}", displayable_path(task.item["path"]))
elif task.items:
- log.info("Album: {0}", displayable_path(task.paths[0]))
+ log.info("Album: {}", displayable_path(task.paths[0]))
for item in task.items:
- log.info(" {0}", displayable_path(item["path"]))
+ log.info(" {}", displayable_path(item["path"]))
# --------------------------------- Consumer --------------------------------- #
@@ -341,9 +338,7 @@ def _resolve_duplicates(session: ImportSession, task: ImportTask):
if task.choice_flag in (Action.ASIS, Action.APPLY, Action.RETAG):
found_duplicates = task.find_duplicates(session.lib)
if found_duplicates:
- log.debug(
- "found duplicates: {}".format([o.id for o in found_duplicates])
- )
+ log.debug("found duplicates: {}", [o.id for o in found_duplicates])
# Get the default action to follow from config.
duplicate_action = config["import"]["duplicate_action"].as_choice(
@@ -355,7 +350,7 @@ def _resolve_duplicates(session: ImportSession, task: ImportTask):
"ask": "a",
}
)
- log.debug("default action for duplicates: {0}", duplicate_action)
+ log.debug("default action for duplicates: {}", duplicate_action)
if duplicate_action == "s":
# Skip new.
diff --git a/beets/importer/state.py b/beets/importer/state.py
index fccb7c282..fde26c606 100644
--- a/beets/importer/state.py
+++ b/beets/importer/state.py
@@ -87,7 +87,7 @@ class ImportState:
# unpickling, including ImportError. We use a catch-all
# exception to avoid enumerating them all (the docs don't even have a
# full list!).
- log.debug("state file could not be read: {0}", exc)
+ log.debug("state file could not be read: {}", exc)
def _save(self):
try:
@@ -100,7 +100,7 @@ class ImportState:
f,
)
except OSError as exc:
- log.error("state file could not be written: {0}", exc)
+ log.error("state file could not be written: {}", exc)
# -------------------------------- Tagprogress ------------------------------- #
diff --git a/beets/importer/tasks.py b/beets/importer/tasks.py
index abe2ca8a9..710f4da50 100644
--- a/beets/importer/tasks.py
+++ b/beets/importer/tasks.py
@@ -51,15 +51,16 @@ SINGLE_ARTIST_THRESH = 0.25
# def extend_reimport_fresh_fields_item():
# importer.REIMPORT_FRESH_FIELDS_ITEM.extend(['tidal_track_popularity']
# )
-REIMPORT_FRESH_FIELDS_ALBUM = [
+REIMPORT_FRESH_FIELDS_ITEM = [
"data_source",
"bandcamp_album_id",
"spotify_album_id",
"deezer_album_id",
"beatport_album_id",
"tidal_album_id",
+ "data_url",
]
-REIMPORT_FRESH_FIELDS_ITEM = list(REIMPORT_FRESH_FIELDS_ALBUM)
+REIMPORT_FRESH_FIELDS_ALBUM = [*REIMPORT_FRESH_FIELDS_ITEM, "media"]
# Global logger.
log = logging.getLogger("beets")
@@ -267,13 +268,11 @@ class ImportTask(BaseImportTask):
def remove_duplicates(self, lib: library.Library):
duplicate_items = self.duplicate_items(lib)
- log.debug("removing {0} old duplicated items", len(duplicate_items))
+ log.debug("removing {} old duplicated items", len(duplicate_items))
for item in duplicate_items:
item.remove()
if lib.directory in util.ancestry(item.path):
- log.debug(
- "deleting duplicate {0}", util.displayable_path(item.path)
- )
+ log.debug("deleting duplicate {.filepath}", item)
util.remove(item.path)
util.prune_dirs(os.path.dirname(item.path), lib.directory)
@@ -285,10 +284,10 @@ class ImportTask(BaseImportTask):
for field, view in config["import"]["set_fields"].items():
value = str(view.get())
log.debug(
- "Set field {1}={2} for {0}",
- util.displayable_path(self.paths),
+ "Set field {}={} for {}",
field,
value,
+ util.displayable_path(self.paths),
)
self.album.set_parse(field, format(self.album, value))
for item in items:
@@ -554,12 +553,11 @@ class ImportTask(BaseImportTask):
]
if overwritten_fields:
log.debug(
- "Reimported {} {}. Not preserving flexible attributes {}. "
- "Path: {}",
+ "Reimported {0} {1.id}. Not preserving flexible attributes {2}. "
+ "Path: {1.filepath}",
noun,
- new_obj.id,
+ new_obj,
overwritten_fields,
- util.displayable_path(new_obj.path),
)
for key in overwritten_fields:
del existing_fields[key]
@@ -578,17 +576,15 @@ class ImportTask(BaseImportTask):
self.album.artpath = replaced_album.artpath
self.album.store()
log.debug(
- "Reimported album {}. Preserving attribute ['added']. "
- "Path: {}",
- self.album.id,
- util.displayable_path(self.album.path),
+ "Reimported album {0.album.id}. Preserving attribute ['added']. "
+ "Path: {0.album.filepath}",
+ self,
)
log.debug(
- "Reimported album {}. Preserving flexible attributes {}. "
- "Path: {}",
- self.album.id,
+ "Reimported album {0.album.id}. Preserving flexible"
+ " attributes {1}. Path: {0.album.filepath}",
+ self,
list(album_fields.keys()),
- util.displayable_path(self.album.path),
)
for item in self.imported_items():
@@ -597,21 +593,19 @@ class ImportTask(BaseImportTask):
if dup_item.added and dup_item.added != item.added:
item.added = dup_item.added
log.debug(
- "Reimported item {}. Preserving attribute ['added']. "
- "Path: {}",
- item.id,
- util.displayable_path(item.path),
+ "Reimported item {0.id}. Preserving attribute ['added']. "
+ "Path: {0.filepath}",
+ item,
)
item_fields = _reduce_and_log(
item, dup_item._values_flex, REIMPORT_FRESH_FIELDS_ITEM
)
item.update(item_fields)
log.debug(
- "Reimported item {}. Preserving flexible attributes {}. "
- "Path: {}",
- item.id,
+ "Reimported item {0.id}. Preserving flexible attributes {1}. "
+ "Path: {0.filepath}",
+ item,
list(item_fields.keys()),
- util.displayable_path(item.path),
)
item.store()
@@ -621,14 +615,10 @@ class ImportTask(BaseImportTask):
"""
for item in self.imported_items():
for dup_item in self.replaced_items[item]:
- log.debug(
- "Replacing item {0}: {1}",
- dup_item.id,
- util.displayable_path(item.path),
- )
+ log.debug("Replacing item {.id}: {.filepath}", dup_item, item)
dup_item.remove()
log.debug(
- "{0} of {1} items replaced",
+ "{} of {} items replaced",
sum(bool(v) for v in self.replaced_items.values()),
len(self.imported_items()),
)
@@ -747,10 +737,10 @@ class SingletonImportTask(ImportTask):
for field, view in config["import"]["set_fields"].items():
value = str(view.get())
log.debug(
- "Set field {1}={2} for {0}",
- util.displayable_path(self.paths),
+ "Set field {}={} for {}",
field,
value,
+ util.displayable_path(self.paths),
)
self.item.set_parse(field, format(self.item, value))
self.item.store()
@@ -870,7 +860,7 @@ class ArchiveImportTask(SentinelImportTask):
"""Removes the temporary directory the archive was extracted to."""
if self.extracted and self.toppath:
log.debug(
- "Removing extracted directory: {0}",
+ "Removing extracted directory: {}",
util.displayable_path(self.toppath),
)
shutil.rmtree(util.syspath(self.toppath))
@@ -1002,7 +992,7 @@ class ImportTaskFactory:
"""Return a `SingletonImportTask` for the music file."""
if self.session.already_imported(self.toppath, [path]):
log.debug(
- "Skipping previously-imported path: {0}",
+ "Skipping previously-imported path: {}",
util.displayable_path(path),
)
self.skipped += 1
@@ -1026,7 +1016,7 @@ class ImportTaskFactory:
if self.session.already_imported(self.toppath, dirs):
log.debug(
- "Skipping previously-imported path: {0}",
+ "Skipping previously-imported path: {}",
util.displayable_path(dirs),
)
self.skipped += 1
@@ -1063,19 +1053,17 @@ class ImportTaskFactory:
)
return
- log.debug(
- "Extracting archive: {0}", util.displayable_path(self.toppath)
- )
+ log.debug("Extracting archive: {}", util.displayable_path(self.toppath))
archive_task = ArchiveImportTask(self.toppath)
try:
archive_task.extract()
except Exception as exc:
- log.error("extraction failed: {0}", exc)
+ log.error("extraction failed: {}", exc)
return
# Now read albums from the extracted directory.
self.toppath = archive_task.toppath
- log.debug("Archive extracted to: {0}", self.toppath)
+ log.debug("Archive extracted to: {.toppath}", self)
return archive_task
def read_item(self, path: util.PathBytes):
@@ -1091,10 +1079,10 @@ class ImportTaskFactory:
# Silently ignore non-music files.
pass
elif isinstance(exc.reason, mediafile.UnreadableFileError):
- log.warning("unreadable file: {0}", util.displayable_path(path))
+ log.warning("unreadable file: {}", util.displayable_path(path))
else:
log.error(
- "error reading {0}: {1}", util.displayable_path(path), exc
+ "error reading {}: {}", util.displayable_path(path), exc
)
diff --git a/beets/library/exceptions.py b/beets/library/exceptions.py
index 7f117a2fe..0dc874c2a 100644
--- a/beets/library/exceptions.py
+++ b/beets/library/exceptions.py
@@ -28,11 +28,11 @@ class ReadError(FileOperationError):
"""An error while reading a file (i.e. in `Item.read`)."""
def __str__(self):
- return "error reading " + str(super())
+ return f"error reading {super()}"
class WriteError(FileOperationError):
"""An error while writing a file (i.e. in `Item.write`)."""
def __str__(self):
- return "error writing " + str(super())
+ return f"error writing {super()}"
diff --git a/beets/library/models.py b/beets/library/models.py
index 7501513a1..cbee2a411 100644
--- a/beets/library/models.py
+++ b/beets/library/models.py
@@ -425,7 +425,7 @@ class Album(LibModel):
new_art = util.unique_path(new_art)
log.debug(
- "moving album art {0} to {1}",
+ "moving album art {} to {}",
util.displayable_path(old_art),
util.displayable_path(new_art),
)
@@ -482,7 +482,7 @@ class Album(LibModel):
"""
item = self.items().get()
if not item:
- raise ValueError("empty album for album id %d" % self.id)
+ raise ValueError(f"empty album for album id {self.id}")
return os.path.dirname(item.path)
def _albumtotal(self):
@@ -844,12 +844,9 @@ class Item(LibModel):
# This must not use `with_album=True`, because that might access
# the database. When debugging, that is not guaranteed to succeed, and
# can even deadlock due to the database lock.
- return "{}({})".format(
- type(self).__name__,
- ", ".join(
- "{}={!r}".format(k, self[k])
- for k in self.keys(with_album=False)
- ),
+ return (
+ f"{type(self).__name__}"
+ f"({', '.join(f'{k}={self[k]!r}' for k in self.keys(with_album=False))})"
)
def keys(self, computed=False, with_album=True):
@@ -995,7 +992,7 @@ class Item(LibModel):
self.write(*args, **kwargs)
return True
except FileOperationError as exc:
- log.error("{0}", exc)
+ log.error("{}", exc)
return False
def try_sync(self, write, move, with_album=True):
@@ -1015,10 +1012,7 @@ class Item(LibModel):
if move:
# Check whether this file is inside the library directory.
if self._db and self._db.directory in util.ancestry(self.path):
- log.debug(
- "moving {0} to synchronize path",
- util.displayable_path(self.path),
- )
+ log.debug("moving {.filepath} to synchronize path", self)
self.move(with_album=with_album)
self.store()
@@ -1090,7 +1084,7 @@ class Item(LibModel):
try:
return os.path.getsize(syspath(self.path))
except (OSError, Exception) as exc:
- log.warning("could not get filesize: {0}", exc)
+ log.warning("could not get filesize: {}", exc)
return 0
# Model methods.
diff --git a/beets/logging.py b/beets/logging.py
index fd8b1962f..3ed5e5a84 100644
--- a/beets/logging.py
+++ b/beets/logging.py
@@ -20,6 +20,8 @@ use {}-style formatting and can interpolate keywords arguments to the logging
calls (`debug`, `info`, etc).
"""
+from __future__ import annotations
+
import threading
from copy import copy
from logging import (
@@ -32,8 +34,10 @@ from logging import (
Handler,
Logger,
NullHandler,
+ RootLogger,
StreamHandler,
)
+from typing import TYPE_CHECKING, Any, Mapping, TypeVar, Union, overload
__all__ = [
"DEBUG",
@@ -49,8 +53,20 @@ __all__ = [
"getLogger",
]
+if TYPE_CHECKING:
+ T = TypeVar("T")
+ from types import TracebackType
-def logsafe(val):
+ # see https://github.com/python/typeshed/blob/main/stdlib/logging/__init__.pyi
+ _SysExcInfoType = Union[
+ tuple[type[BaseException], BaseException, Union[TracebackType, None]],
+ tuple[None, None, None],
+ ]
+ _ExcInfoType = Union[None, bool, _SysExcInfoType, BaseException]
+ _ArgsType = Union[tuple[object, ...], Mapping[str, object]]
+
+
+def _logsafe(val: T) -> str | T:
"""Coerce `bytes` to `str` to avoid crashes solely due to logging.
This is particularly relevant for bytestring paths. Much of our code
@@ -83,40 +99,45 @@ class StrFormatLogger(Logger):
"""
class _LogMessage:
- def __init__(self, msg, args, kwargs):
+ def __init__(
+ self,
+ msg: str,
+ args: _ArgsType,
+ kwargs: dict[str, Any],
+ ):
self.msg = msg
self.args = args
self.kwargs = kwargs
def __str__(self):
- args = [logsafe(a) for a in self.args]
- kwargs = {k: logsafe(v) for (k, v) in self.kwargs.items()}
+ args = [_logsafe(a) for a in self.args]
+ kwargs = {k: _logsafe(v) for (k, v) in self.kwargs.items()}
return self.msg.format(*args, **kwargs)
def _log(
self,
- level,
- msg,
- args,
- exc_info=None,
- extra=None,
- stack_info=False,
+ level: int,
+ msg: object,
+ args: _ArgsType,
+ exc_info: _ExcInfoType = None,
+ extra: Mapping[str, Any] | None = None,
+ stack_info: bool = False,
+ stacklevel: int = 1,
**kwargs,
):
"""Log msg.format(*args, **kwargs)"""
- m = self._LogMessage(msg, args, kwargs)
- stacklevel = kwargs.pop("stacklevel", 1)
- stacklevel = {"stacklevel": stacklevel}
+ if isinstance(msg, str):
+ msg = self._LogMessage(msg, args, kwargs)
return super()._log(
level,
- m,
+ msg,
(),
exc_info=exc_info,
extra=extra,
stack_info=stack_info,
- **stacklevel,
+ stacklevel=stacklevel,
)
@@ -156,9 +177,12 @@ my_manager = copy(Logger.manager)
my_manager.loggerClass = BeetsLogger
-# Override the `getLogger` to use our machinery.
-def getLogger(name=None): # noqa
+@overload
+def getLogger(name: str) -> BeetsLogger: ...
+@overload
+def getLogger(name: None = ...) -> RootLogger: ...
+def getLogger(name=None) -> BeetsLogger | RootLogger: # noqa: N802
if name:
- return my_manager.getLogger(name)
+ return my_manager.getLogger(name) # type: ignore[return-value]
else:
return Logger.root
diff --git a/beets/metadata_plugins.py b/beets/metadata_plugins.py
index 9d69633d6..b865167e4 100644
--- a/beets/metadata_plugins.py
+++ b/beets/metadata_plugins.py
@@ -8,11 +8,12 @@ implemented as plugins.
from __future__ import annotations
import abc
-import inspect
import re
-import warnings
+from functools import cache, cached_property
from typing import TYPE_CHECKING, Generic, Literal, Sequence, TypedDict, TypeVar
+import unidecode
+from confuse import NotFoundError
from typing_extensions import NotRequired
from beets.util import cached_classproperty
@@ -23,36 +24,14 @@ from .plugins import BeetsPlugin, find_plugins, notify_info_yielded, send
if TYPE_CHECKING:
from collections.abc import Iterable
- from confuse import ConfigView
-
- from .autotag import Distance
from .autotag.hooks import AlbumInfo, Item, TrackInfo
+@cache
def find_metadata_source_plugins() -> list[MetadataSourcePlugin]:
- """Returns a list of MetadataSourcePlugin subclass instances
-
- Resolved from all currently loaded beets plugins.
- """
-
- all_plugins = find_plugins()
- metadata_plugins: list[MetadataSourcePlugin | BeetsPlugin] = []
- for plugin in all_plugins:
- if isinstance(plugin, MetadataSourcePlugin):
- metadata_plugins.append(plugin)
- elif hasattr(plugin, "data_source"):
- # TODO: Remove this in the future major release, v3.0.0
- warnings.warn(
- f"{plugin.__class__.__name__} is used as a legacy metadata source. "
- "It should extend MetadataSourcePlugin instead of BeetsPlugin. "
- "Support for this will be removed in the v3.0.0 release!",
- DeprecationWarning,
- stacklevel=2,
- )
- metadata_plugins.append(plugin)
-
- # typeignore: BeetsPlugin is not a MetadataSourcePlugin (legacy support)
- return metadata_plugins # type: ignore[return-value]
+ """Return a list of all loaded metadata source plugins."""
+ # TODO: Make this an isinstance(MetadataSourcePlugin, ...) check in v3.0.0
+ return [p for p in find_plugins() if hasattr(p, "data_source")] # type: ignore[misc]
@notify_info_yielded("albuminfo_received")
@@ -95,46 +74,17 @@ def track_for_id(_id: str) -> TrackInfo | None:
return None
-def track_distance(item: Item, info: TrackInfo) -> Distance:
- """Returns the track distance for an item and trackinfo.
-
- Returns a Distance object is populated by all metadata source plugins
- that implement the :py:meth:`MetadataSourcePlugin.track_distance` method.
- """
- from beets.autotag.distance import Distance
-
- dist = Distance()
- for plugin in find_metadata_source_plugins():
- dist.update(plugin.track_distance(item, info))
- return dist
-
-
-def album_distance(
- items: Sequence[Item],
- album_info: AlbumInfo,
- mapping: dict[Item, TrackInfo],
-) -> Distance:
- """Returns the album distance calculated by plugins."""
- from beets.autotag.distance import Distance
-
- dist = Distance()
- for plugin in find_metadata_source_plugins():
- dist.update(plugin.album_distance(items, album_info, mapping))
- return dist
-
-
-def _get_distance(
- config: ConfigView, data_source: str, info: AlbumInfo | TrackInfo
-) -> Distance:
- """Returns the ``data_source`` weight and the maximum source weight
- for albums or individual tracks.
- """
- from beets.autotag.distance import Distance
-
- dist = Distance()
- if info.data_source == data_source:
- dist.add("source", config["source_weight"].as_number())
- return dist
+@cache
+def get_penalty(data_source: str | None) -> float:
+ """Get the penalty value for the given data source."""
+ return next(
+ (
+ p.data_source_mismatch_penalty
+ for p in find_metadata_source_plugins()
+ if p.data_source == data_source
+ ),
+ MetadataSourcePlugin.DEFAULT_DATA_SOURCE_MISMATCH_PENALTY,
+ )
class MetadataSourcePlugin(BeetsPlugin, metaclass=abc.ABCMeta):
@@ -145,9 +95,31 @@ class MetadataSourcePlugin(BeetsPlugin, metaclass=abc.ABCMeta):
and tracks, and to retrieve album and track information by ID.
"""
+ DEFAULT_DATA_SOURCE_MISMATCH_PENALTY = 0.5
+
+ @cached_classproperty
+ def data_source(cls) -> str:
+ """The data source name for this plugin.
+
+ This is inferred from the plugin name.
+ """
+ return cls.__name__.replace("Plugin", "") # type: ignore[attr-defined]
+
+ @cached_property
+ def data_source_mismatch_penalty(self) -> float:
+ try:
+ return self.config["source_weight"].as_number()
+ except NotFoundError:
+ return self.config["data_source_mismatch_penalty"].as_number()
+
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
- self.config.add({"source_weight": 0.5})
+ self.config.add(
+ {
+ "search_limit": 5,
+ "data_source_mismatch_penalty": self.DEFAULT_DATA_SOURCE_MISMATCH_PENALTY, # noqa: E501
+ }
+ )
@abc.abstractmethod
def album_for_id(self, album_id: str) -> AlbumInfo | None:
@@ -219,35 +191,6 @@ class MetadataSourcePlugin(BeetsPlugin, metaclass=abc.ABCMeta):
return (self.track_for_id(id) for id in ids)
- def album_distance(
- self,
- items: Sequence[Item],
- album_info: AlbumInfo,
- mapping: dict[Item, TrackInfo],
- ) -> Distance:
- """Calculate the distance for an album based on its items and album info."""
- return _get_distance(
- data_source=self.data_source, info=album_info, config=self.config
- )
-
- def track_distance(
- self,
- item: Item,
- info: TrackInfo,
- ) -> Distance:
- """Calculate the distance for a track based on its item and track info."""
- return _get_distance(
- data_source=self.data_source, info=info, config=self.config
- )
-
- @cached_classproperty
- def data_source(cls) -> str:
- """The data source name for this plugin.
-
- This is inferred from the plugin name.
- """
- return cls.__name__.replace("Plugin", "") # type: ignore[attr-defined]
-
def _extract_id(self, url: str) -> str | None:
"""Extract an ID from a URL for this metadata source plugin.
@@ -266,10 +209,9 @@ class MetadataSourcePlugin(BeetsPlugin, metaclass=abc.ABCMeta):
"""Returns an artist string (all artists) and an artist_id (the main
artist) for a list of artist object dicts.
- For each artist, this function moves articles (such as 'a', 'an',
- and 'the') to the front and strips trailing disambiguation numbers. It
- returns a tuple containing the comma-separated string of all
- normalized artists and the ``id`` of the main/first artist.
+ For each artist, this function moves articles (such as 'a', 'an', and 'the')
+ to the front. It returns a tuple containing the comma-separated string
+ of all normalized artists and the ``id`` of the main/first artist.
Alternatively a keyword can be used to combine artists together into a
single string by passing the join_key argument.
@@ -293,8 +235,6 @@ class MetadataSourcePlugin(BeetsPlugin, metaclass=abc.ABCMeta):
if not artist_id:
artist_id = artist[id_key]
name = artist[name_key]
- # Strip disambiguation number.
- name = re.sub(r" \(\d+\)$", "", name)
# Move articles to the front.
name = re.sub(r"^(.*?), (a|an|the)$", r"\2 \1", name, flags=re.I)
# Use a join keyword if requested and available.
@@ -334,18 +274,26 @@ class SearchApiMetadataSourcePlugin(
of identifiers for the requested type (album or track).
"""
+ def __init__(self, *args, **kwargs) -> None:
+ super().__init__(*args, **kwargs)
+ self.config.add(
+ {
+ "search_query_ascii": False,
+ }
+ )
+
@abc.abstractmethod
def _search_api(
self,
query_type: Literal["album", "track"],
filters: SearchFilter,
- keywords: str = "",
+ query_string: str = "",
) -> Sequence[R]:
"""Perform a search on the API.
:param query_type: The type of query to perform.
:param filters: A dictionary of filters to apply to the search.
- :param keywords: Additional keywords to include in the search.
+ :param query_string: Additional query to include in the search.
Should return a list of identifiers for the requested type (album or track).
"""
@@ -358,7 +306,9 @@ class SearchApiMetadataSourcePlugin(
album: str,
va_likely: bool,
) -> Iterable[AlbumInfo]:
- query_filters: SearchFilter = {"album": album}
+ query_filters: SearchFilter = {}
+ if album:
+ query_filters["album"] = album
if not va_likely:
query_filters["artist"] = artist
@@ -373,7 +323,9 @@ class SearchApiMetadataSourcePlugin(
def item_candidates(
self, item: Item, artist: str, title: str
) -> Iterable[TrackInfo]:
- results = self._search_api("track", {"artist": artist}, keywords=title)
+ results = self._search_api(
+ "track", {"artist": artist}, query_string=title
+ )
if not results:
return []
@@ -382,12 +334,26 @@ class SearchApiMetadataSourcePlugin(
self.tracks_for_ids([result["id"] for result in results if result]),
)
+ def _construct_search_query(
+ self, filters: SearchFilter, query_string: str
+ ) -> str:
+ """Construct a query string with the specified filters and keywords to
+ be provided to the spotify (or similar) search API.
-# Dynamically copy methods to BeetsPlugin for legacy support
-# TODO: Remove this in the future major release, v3.0.0
+ The returned format was initially designed for spotify's search API but
+ we found is also useful with other APIs that support similar query structures.
+ see `spotify `_
+ and `deezer `_.
-for name, method in inspect.getmembers(
- MetadataSourcePlugin, predicate=inspect.isfunction
-):
- if not hasattr(BeetsPlugin, name):
- setattr(BeetsPlugin, name, method)
+ :param filters: Field filters to apply.
+ :param query_string: Query keywords to use.
+ :return: Query string to be provided to the search API.
+ """
+
+ components = [query_string, *(f"{k}:'{v}'" for k, v in filters.items())]
+ query = " ".join(filter(None, components))
+
+ if self.config["search_query_ascii"].get():
+ query = unidecode.unidecode(query)
+
+ return query
diff --git a/beets/plugins.py b/beets/plugins.py
index c5c5b2c53..e10dcf80c 100644
--- a/beets/plugins.py
+++ b/beets/plugins.py
@@ -20,8 +20,10 @@ import abc
import inspect
import re
import sys
+import warnings
from collections import defaultdict
-from functools import wraps
+from functools import cached_property, wraps
+from importlib import import_module
from pathlib import Path
from types import GenericAlias
from typing import TYPE_CHECKING, Any, ClassVar, Literal, TypeVar
@@ -130,9 +132,9 @@ class PluginLogFilter(logging.Filter):
def filter(self, record):
if hasattr(record.msg, "msg") and isinstance(record.msg.msg, str):
# A _LogMessage from our hacked-up Logging replacement.
- record.msg.msg = self.prefix + record.msg.msg
+ record.msg.msg = f"{self.prefix}{record.msg.msg}"
elif isinstance(record.msg, str):
- record.msg = self.prefix + record.msg
+ record.msg = f"{self.prefix}{record.msg}"
return True
@@ -158,6 +160,59 @@ class BeetsPlugin(metaclass=abc.ABCMeta):
early_import_stages: list[ImportStageFunc]
import_stages: list[ImportStageFunc]
+ def __init_subclass__(cls) -> None:
+ """Enable legacy metadata‐source plugins to work with the new interface.
+
+ When a plugin subclass of BeetsPlugin defines a `data_source` attribute
+ but does not inherit from MetadataSourcePlugin, this hook:
+
+ 1. Skips abstract classes.
+ 2. Warns that the class should extend MetadataSourcePlugin (deprecation).
+ 3. Copies any nonabstract methods from MetadataSourcePlugin onto the
+ subclass to provide the full plugin API.
+
+ This compatibility layer will be removed in the v3.0.0 release.
+ """
+ # TODO: Remove in v3.0.0
+ if inspect.isabstract(cls):
+ return
+
+ from beets.metadata_plugins import MetadataSourcePlugin
+
+ if issubclass(cls, MetadataSourcePlugin) or not hasattr(
+ cls, "data_source"
+ ):
+ return
+
+ warnings.warn(
+ f"{cls.__name__} is used as a legacy metadata source. "
+ "It should extend MetadataSourcePlugin instead of BeetsPlugin. "
+ "Support for this will be removed in the v3.0.0 release!",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+
+ method: property | cached_property[Any] | Callable[..., Any]
+ for name, method in inspect.getmembers(
+ MetadataSourcePlugin,
+ predicate=lambda f: ( # type: ignore[arg-type]
+ (
+ isinstance(f, (property, cached_property))
+ and not hasattr(
+ BeetsPlugin,
+ getattr(f, "attrname", None) or f.fget.__name__, # type: ignore[union-attr]
+ )
+ )
+ or (
+ inspect.isfunction(f)
+ and f.__name__
+ and not getattr(f, "__isabstractmethod__", False)
+ and not hasattr(BeetsPlugin, f.__name__)
+ )
+ ),
+ ):
+ setattr(cls, name, method)
+
def __init__(self, name: str | None = None):
"""Perform one-time plugin setup."""
@@ -181,6 +236,37 @@ class BeetsPlugin(metaclass=abc.ABCMeta):
if not any(isinstance(f, PluginLogFilter) for f in self._log.filters):
self._log.addFilter(PluginLogFilter(self))
+ # In order to verify the config we need to make sure the plugin is fully
+ # configured (plugins usually add the default configuration *after*
+ # calling super().__init__()).
+ self.register_listener("pluginload", self._verify_config)
+
+ def _verify_config(self, *_, **__) -> None:
+ """Verify plugin configuration.
+
+ If deprecated 'source_weight' option is explicitly set by the user, they
+ will see a warning in the logs. Otherwise, this must be configured by
+ a third party plugin, thus we raise a deprecation warning which won't be
+ shown to user but will be visible to plugin developers.
+ """
+ # TODO: Remove in v3.0.0
+ if (
+ not hasattr(self, "data_source")
+ or "source_weight" not in self.config
+ ):
+ return
+
+ message = (
+ "'source_weight' configuration option is deprecated and will be"
+ " removed in v3.0.0. Use 'data_source_mismatch_penalty' instead"
+ )
+ for source in self.config.root().sources:
+ if "source_weight" in (source.get(self.name) or {}):
+ if source.filename: # user config
+ self._log.warning(message)
+ else: # 3rd-party plugin config
+ warnings.warn(message, DeprecationWarning, stacklevel=0)
+
def commands(self) -> Sequence[Subcommand]:
"""Should return a list of beets.ui.Subcommand objects for
commands that should be added to beets' CLI.
@@ -347,14 +433,20 @@ def _get_plugin(name: str) -> BeetsPlugin | None:
Attempts to import the plugin module, locate the appropriate plugin class
within it, and return an instance. Handles import failures gracefully and
logs warnings for missing plugins or loading errors.
+
+ Note we load the *last* plugin class found in the plugin namespace. This
+ allows plugins to define helper classes that inherit from BeetsPlugin
+ without those being loaded as the main plugin class.
+
+ Returns None if the plugin could not be loaded for any reason.
"""
try:
try:
- namespace = __import__(f"{PLUGIN_NAMESPACE}.{name}", None, None)
+ namespace = import_module(f"{PLUGIN_NAMESPACE}.{name}")
except Exception as exc:
raise PluginImportError(name) from exc
- for obj in getattr(namespace, name).__dict__.values():
+ for obj in reversed(namespace.__dict__.values()):
if (
inspect.isclass(obj)
and not isinstance(
@@ -363,6 +455,12 @@ def _get_plugin(name: str) -> BeetsPlugin | None:
and issubclass(obj, BeetsPlugin)
and obj != BeetsPlugin
and not inspect.isabstract(obj)
+ # Only consider this plugin's module or submodules to avoid
+ # conflicts when plugins import other BeetsPlugin classes
+ and (
+ obj.__module__ == namespace.__name__
+ or obj.__module__.startswith(f"{namespace.__name__}.")
+ )
):
return obj()
@@ -384,7 +482,7 @@ def load_plugins() -> None:
"""
if not _instances:
names = get_plugin_names()
- log.info("Loading plugins: {}", ", ".join(sorted(names)))
+ log.debug("Loading plugins: {}", ", ".join(sorted(names)))
_instances.extend(filter(None, map(_get_plugin, names)))
send("pluginload")
@@ -424,9 +522,9 @@ def types(model_cls: type[AnyModel]) -> dict[str, Type]:
for field in plugin_types:
if field in types and plugin_types[field] != types[field]:
raise PluginConflictError(
- "Plugin {} defines flexible field {} "
+ f"Plugin {plugin.name} defines flexible field {field} "
"which has already been defined with "
- "another type.".format(plugin.name, field)
+ "another type."
)
types.update(plugin_types)
return types
@@ -543,7 +641,7 @@ def send(event: EventType, **arguments: Any) -> list[Any]:
Return a list of non-None values returned from the handlers.
"""
- log.debug("Sending event: {0}", event)
+ log.debug("Sending event: {}", event)
return [
r
for handler in BeetsPlugin.listeners[event]
@@ -551,17 +649,21 @@ def send(event: EventType, **arguments: Any) -> list[Any]:
]
-def feat_tokens(for_artist: bool = True) -> str:
+def feat_tokens(
+ for_artist: bool = True, custom_words: list[str] | None = None
+) -> str:
"""Return a regular expression that matches phrases like "featuring"
that separate a main artist or a song title from secondary artists.
The `for_artist` option determines whether the regex should be
suitable for matching artist fields (the default) or title fields.
"""
feat_words = ["ft", "featuring", "feat", "feat.", "ft."]
+ if isinstance(custom_words, list):
+ feat_words += custom_words
if for_artist:
feat_words += ["with", "vs", "and", "con", "&"]
- return r"(?<=[\s(\[])(?:{})(?=\s)".format(
- "|".join(re.escape(x) for x in feat_words)
+ return (
+ rf"(?<=[\s(\[])(?:{'|'.join(re.escape(x) for x in feat_words)})(?=\s)"
)
diff --git a/beets/test/_common.py b/beets/test/_common.py
index d70f9ec80..ffb2bfd65 100644
--- a/beets/test/_common.py
+++ b/beets/test/_common.py
@@ -153,7 +153,7 @@ class DummyIn:
self.out = out
def add(self, s):
- self.buf.append(s + "\n")
+ self.buf.append(f"{s}\n")
def close(self):
pass
diff --git a/beets/test/helper.py b/beets/test/helper.py
index f1633c110..ea08ec840 100644
--- a/beets/test/helper.py
+++ b/beets/test/helper.py
@@ -58,7 +58,6 @@ from beets.ui.commands import TerminalImportSession
from beets.util import (
MoveOperation,
bytestring_path,
- cached_classproperty,
clean_module_tempdir,
syspath,
)
@@ -267,7 +266,7 @@ class TestHelper(ConfigMixin):
The item is attached to the database from `self.lib`.
"""
values_ = {
- "title": "t\u00eftle {0}",
+ "title": "t\u00eftle {}",
"artist": "the \u00e4rtist",
"album": "the \u00e4lbum",
"track": 1,
@@ -278,7 +277,7 @@ class TestHelper(ConfigMixin):
values_["db"] = self.lib
item = Item(**values_)
if "path" not in values:
- item["path"] = "audio." + item["format"].lower()
+ item["path"] = f"audio.{item['format'].lower()}"
# mtime needs to be set last since other assignments reset it.
item.mtime = 12345
return item
@@ -310,7 +309,7 @@ class TestHelper(ConfigMixin):
item = self.create_item(**values)
extension = item["format"].lower()
item["path"] = os.path.join(
- _common.RSRC, util.bytestring_path("min." + extension)
+ _common.RSRC, util.bytestring_path(f"min.{extension}")
)
item.add(self.lib)
item.move(operation=MoveOperation.COPY)
@@ -325,7 +324,7 @@ class TestHelper(ConfigMixin):
"""Add a number of items with files to the database."""
# TODO base this on `add_item()`
items = []
- path = os.path.join(_common.RSRC, util.bytestring_path("full." + ext))
+ path = os.path.join(_common.RSRC, util.bytestring_path(f"full.{ext}"))
for i in range(count):
item = Item.from_path(path)
item.album = f"\u00e4lbum {i}" # Check unicode paths
@@ -372,7 +371,7 @@ class TestHelper(ConfigMixin):
specified extension a cover art image is added to the media
file.
"""
- src = os.path.join(_common.RSRC, util.bytestring_path("full." + ext))
+ src = os.path.join(_common.RSRC, util.bytestring_path(f"full.{ext}"))
handle, path = mkstemp(dir=self.temp_dir)
path = bytestring_path(path)
os.close(handle)
@@ -495,7 +494,6 @@ class PluginMixin(ConfigMixin):
# FIXME this should eventually be handled by a plugin manager
plugins = (self.plugin,) if hasattr(self, "plugin") else plugins
self.config["plugins"] = plugins
- cached_classproperty.cache.clear()
beets.plugins.load_plugins()
def unload_plugins(self) -> None:
@@ -570,7 +568,7 @@ class ImportHelper(TestHelper):
medium = MediaFile(track_path)
medium.update(
{
- "album": "Tag Album" + (f" {album_id}" if album_id else ""),
+ "album": f"Tag Album{f' {album_id}' if album_id else ''}",
"albumartist": None,
"mb_albumid": None,
"comp": None,
@@ -831,23 +829,21 @@ class AutotagStub:
def _make_track_match(self, artist, album, number):
return TrackInfo(
- title="Applied Track %d" % number,
- track_id="match %d" % number,
+ title=f"Applied Track {number}",
+ track_id=f"match {number}",
artist=artist,
length=1,
index=0,
)
def _make_album_match(self, artist, album, tracks, distance=0, missing=0):
- if distance:
- id = " " + "M" * distance
- else:
- id = ""
+ id = f" {'M' * distance}" if distance else ""
+
if artist is None:
artist = "Various Artists"
else:
- artist = artist.replace("Tag", "Applied") + id
- album = album.replace("Tag", "Applied") + id
+ artist = f"{artist.replace('Tag', 'Applied')}{id}"
+ album = f"{album.replace('Tag', 'Applied')}{id}"
track_infos = []
for i in range(tracks - missing):
@@ -858,8 +854,8 @@ class AutotagStub:
album=album,
tracks=track_infos,
va=False,
- album_id="albumid" + id,
- artist_id="artistid" + id,
+ album_id=f"albumid{id}",
+ artist_id=f"artistid{id}",
albumtype="soundtrack",
data_source="match_source",
bandcamp_album_id="bc_url",
@@ -885,7 +881,7 @@ class FetchImageHelper:
super().run(*args, **kwargs)
IMAGEHEADER: dict[str, bytes] = {
- "image/jpeg": b"\xff\xd8\xff" + b"\x00" * 3 + b"JFIF",
+ "image/jpeg": b"\xff\xd8\xff\x00\x00\x00JFIF",
"image/png": b"\211PNG\r\n\032\n",
"image/gif": b"GIF89a",
# dummy type that is definitely not a valid image content type
diff --git a/beets/ui/__init__.py b/beets/ui/__init__.py
index 01030a977..fe980bb5c 100644
--- a/beets/ui/__init__.py
+++ b/beets/ui/__init__.py
@@ -23,14 +23,16 @@ import errno
import optparse
import os.path
import re
+import shutil
import sqlite3
-import struct
import sys
import textwrap
import traceback
import warnings
from difflib import SequenceMatcher
-from typing import Any, Callable
+from functools import cache
+from itertools import chain
+from typing import Any, Callable, Literal
import confuse
@@ -125,7 +127,7 @@ def print_(*strings: str, end: str = "\n") -> None:
The `end` keyword argument behaves similarly to the built-in `print`
(it defaults to a newline).
"""
- txt = " ".join(strings or ("",)) + end
+ txt = f"{' '.join(strings or ('',))}{end}"
# Encode the string and write it to stdout.
# On Python 3, sys.stdout expects text strings and uses the
@@ -269,7 +271,7 @@ def input_options(
)
):
# The first option is the default; mark it.
- show_letter = "[%s]" % found_letter.upper()
+ show_letter = f"[{found_letter.upper()}]"
is_default = True
else:
show_letter = found_letter.upper()
@@ -308,9 +310,9 @@ def input_options(
if isinstance(default, int):
default_name = str(default)
default_name = colorize("action_default", default_name)
- tmpl = "# selection (default %s)"
- prompt_parts.append(tmpl % default_name)
- prompt_part_lengths.append(len(tmpl % str(default)))
+ tmpl = "# selection (default {})"
+ prompt_parts.append(tmpl.format(default_name))
+ prompt_part_lengths.append(len(tmpl) - 2 + len(str(default)))
else:
prompt_parts.append("# selection")
prompt_part_lengths.append(len(prompt_parts[-1]))
@@ -338,7 +340,7 @@ def input_options(
if line_length != 0:
# Not the beginning of the line; need a space.
- part = " " + part
+ part = f" {part}"
length += 1
prompt += part
@@ -349,8 +351,8 @@ def input_options(
if not fallback_prompt:
fallback_prompt = "Enter one of "
if numrange:
- fallback_prompt += "%i-%i, " % numrange
- fallback_prompt += ", ".join(display_letters) + ":"
+ fallback_prompt += "{}-{}, ".format(*numrange)
+ fallback_prompt += f"{', '.join(display_letters)}:"
resp = input_(prompt)
while True:
@@ -406,7 +408,7 @@ def input_select_objects(prompt, objs, rep, prompt_all=None):
objects individually.
"""
choice = input_options(
- ("y", "n", "s"), False, "%s? (Yes/no/select)" % (prompt_all or prompt)
+ ("y", "n", "s"), False, f"{prompt_all or prompt}? (Yes/no/select)"
)
print() # Blank line.
@@ -420,7 +422,7 @@ def input_select_objects(prompt, objs, rep, prompt_all=None):
answer = input_options(
("y", "n", "q"),
True,
- "%s? (yes/no/quit)" % prompt,
+ f"{prompt}? (yes/no/quit)",
"Enter Y or N:",
)
if answer == "y":
@@ -438,7 +440,7 @@ def input_select_objects(prompt, objs, rep, prompt_all=None):
# ANSI terminal colorization code heavily inspired by pygments:
# https://bitbucket.org/birkenfeld/pygments-main/src/default/pygments/console.py
# (pygments is by Tim Hatch, Armin Ronacher, et al.)
-COLOR_ESCAPE = "\x1b["
+COLOR_ESCAPE = "\x1b"
LEGACY_COLORS = {
"black": ["black"],
"darkred": ["red"],
@@ -463,7 +465,7 @@ LEGACY_COLORS = {
"white": ["bold", "white"],
}
# All ANSI Colors.
-ANSI_CODES = {
+CODE_BY_COLOR = {
# Styles.
"normal": 0,
"bold": 1,
@@ -494,11 +496,17 @@ ANSI_CODES = {
"bg_cyan": 46,
"bg_white": 47,
}
-RESET_COLOR = COLOR_ESCAPE + "39;49;00m"
-
-# These abstract COLOR_NAMES are lazily mapped on to the actual color in COLORS
-# as they are defined in the configuration files, see function: colorize
-COLOR_NAMES = [
+RESET_COLOR = f"{COLOR_ESCAPE}[39;49;00m"
+# Precompile common ANSI-escape regex patterns
+ANSI_CODE_REGEX = re.compile(rf"({COLOR_ESCAPE}\[[;0-9]*m)")
+ESC_TEXT_REGEX = re.compile(
+ rf"""(?P[^{COLOR_ESCAPE}]*)
+ (?P(?:{ANSI_CODE_REGEX.pattern})+)
+ (?P[^{COLOR_ESCAPE}]+)(?P{re.escape(RESET_COLOR)})
+ (?P[^{COLOR_ESCAPE}]*)""",
+ re.VERBOSE,
+)
+ColorName = Literal[
"text_success",
"text_warning",
"text_error",
@@ -507,76 +515,54 @@ COLOR_NAMES = [
"action_default",
"action",
# New Colors
- "text",
"text_faint",
"import_path",
"import_path_items",
"action_description",
- "added",
- "removed",
"changed",
- "added_highlight",
- "removed_highlight",
- "changed_highlight",
"text_diff_added",
"text_diff_removed",
- "text_diff_changed",
]
-COLORS: dict[str, list[str]] | None = None
-def _colorize(color, text):
- """Returns a string that prints the given text in the given color
- in a terminal that is ANSI color-aware. The color must be a list of strings
- from ANSI_CODES.
+@cache
+def get_color_config() -> dict[ColorName, str]:
+ """Parse and validate color configuration, converting names to ANSI codes.
+
+ Processes the UI color configuration, handling both new list format and
+ legacy single-color format. Validates all color names against known codes
+ and raises an error for any invalid entries.
"""
- # Construct escape sequence to be put before the text by iterating
- # over all "ANSI codes" in `color`.
- escape = ""
- for code in color:
- escape = escape + COLOR_ESCAPE + "%im" % ANSI_CODES[code]
- return escape + text + RESET_COLOR
+ colors_by_color_name: dict[ColorName, list[str]] = {
+ k: (v if isinstance(v, list) else LEGACY_COLORS.get(v, [v]))
+ for k, v in config["ui"]["colors"].flatten().items()
+ }
+
+ if invalid_colors := (
+ set(chain.from_iterable(colors_by_color_name.values()))
+ - CODE_BY_COLOR.keys()
+ ):
+ raise UserError(
+ f"Invalid color(s) in configuration: {', '.join(invalid_colors)}"
+ )
+
+ return {
+ n: ";".join(str(CODE_BY_COLOR[c]) for c in colors)
+ for n, colors in colors_by_color_name.items()
+ }
-def colorize(color_name, text):
- """Colorize text if colored output is enabled. (Like _colorize but
- conditional.)
+def colorize(color_name: ColorName, text: str) -> str:
+ """Apply ANSI color formatting to text based on configuration settings.
+
+ Returns colored text when color output is enabled and NO_COLOR environment
+ variable is not set, otherwise returns plain text unchanged.
"""
if config["ui"]["color"] and "NO_COLOR" not in os.environ:
- global COLORS
- if not COLORS:
- # Read all color configurations and set global variable COLORS.
- COLORS = dict()
- for name in COLOR_NAMES:
- # Convert legacy color definitions (strings) into the new
- # list-based color definitions. Do this by trying to read the
- # color definition from the configuration as unicode - if this
- # is successful, the color definition is a legacy definition
- # and has to be converted.
- try:
- color_def = config["ui"]["colors"][name].get(str)
- except (confuse.ConfigTypeError, NameError):
- # Normal color definition (type: list of unicode).
- color_def = config["ui"]["colors"][name].get(list)
- else:
- # Legacy color definition (type: unicode). Convert.
- if color_def in LEGACY_COLORS:
- color_def = LEGACY_COLORS[color_def]
- else:
- raise UserError("no such color %s", color_def)
- for code in color_def:
- if code not in ANSI_CODES.keys():
- raise ValueError("no such ANSI code %s", code)
- COLORS[name] = color_def
- # In case a 3rd party plugin is still passing the actual color ('red')
- # instead of the abstract color name ('text_error')
- color = COLORS.get(color_name)
- if not color:
- log.debug("Invalid color_name: {0}", color_name)
- color = color_name
- return _colorize(color, text)
- else:
- return text
+ color_code = get_color_config()[color_name]
+ return f"{COLOR_ESCAPE}[{color_code}m{text}{RESET_COLOR}"
+
+ return text
def uncolorize(colored_text):
@@ -589,26 +575,22 @@ def uncolorize(colored_text):
# [;\d]* - matches a sequence consisting of one or more digits or
# semicola
# [A-Za-z] - matches a letter
- ansi_code_regex = re.compile(r"\x1b\[[;\d]*[A-Za-z]", re.VERBOSE)
- # Strip ANSI codes from `colored_text` using the regular expression.
- text = ansi_code_regex.sub("", colored_text)
- return text
+ return ANSI_CODE_REGEX.sub("", colored_text)
def color_split(colored_text, index):
- ansi_code_regex = re.compile(r"(\x1b\[[;\d]*[A-Za-z])", re.VERBOSE)
length = 0
pre_split = ""
post_split = ""
found_color_code = None
found_split = False
- for part in ansi_code_regex.split(colored_text):
+ for part in ANSI_CODE_REGEX.split(colored_text):
# Count how many real letters we have passed
length += color_len(part)
if found_split:
post_split += part
else:
- if ansi_code_regex.match(part):
+ if ANSI_CODE_REGEX.match(part):
# This is a color code
if part == RESET_COLOR:
found_color_code = None
@@ -621,8 +603,8 @@ def color_split(colored_text, index):
split_index = index - (length - color_len(part))
found_split = True
if found_color_code:
- pre_split += part[:split_index] + RESET_COLOR
- post_split += found_color_code + part[split_index:]
+ pre_split += f"{part[:split_index]}{RESET_COLOR}"
+ post_split += f"{found_color_code}{part[split_index:]}"
else:
pre_split += part[:split_index]
post_split += part[split_index:]
@@ -642,7 +624,7 @@ def color_len(colored_text):
return len(uncolorize(colored_text))
-def _colordiff(a, b):
+def _colordiff(a: Any, b: Any) -> tuple[str, str]:
"""Given two values, return the same pair of strings except with
their differences highlighted in the specified color. Strings are
highlighted intelligently to show differences; other values are
@@ -664,35 +646,21 @@ def _colordiff(a, b):
colorize("text_diff_added", str(b)),
)
- a_out = []
- b_out = []
+ before = ""
+ after = ""
matcher = SequenceMatcher(lambda x: False, a, b)
for op, a_start, a_end, b_start, b_end in matcher.get_opcodes():
- if op == "equal":
- # In both strings.
- a_out.append(a[a_start:a_end])
- b_out.append(b[b_start:b_end])
- elif op == "insert":
- # Right only.
- b_out.append(colorize("text_diff_added", b[b_start:b_end]))
- elif op == "delete":
- # Left only.
- a_out.append(colorize("text_diff_removed", a[a_start:a_end]))
- elif op == "replace":
- # Right and left differ. Colorise with second highlight if
- # it's just a case change.
- if a[a_start:a_end].lower() != b[b_start:b_end].lower():
- a_color = "text_diff_removed"
- b_color = "text_diff_added"
- else:
- a_color = b_color = "text_highlight_minor"
- a_out.append(colorize(a_color, a[a_start:a_end]))
- b_out.append(colorize(b_color, b[b_start:b_end]))
- else:
- assert False
+ before_part, after_part = a[a_start:a_end], b[b_start:b_end]
+ if op in {"delete", "replace"}:
+ before_part = colorize("text_diff_removed", before_part)
+ if op in {"insert", "replace"}:
+ after_part = colorize("text_diff_added", after_part)
- return "".join(a_out), "".join(b_out)
+ before += before_part
+ after += after_part
+
+ return before, after
def colordiff(a, b):
@@ -726,32 +694,16 @@ def get_replacements():
replacements.append((re.compile(pattern), repl))
except re.error:
raise UserError(
- "malformed regular expression in replace: {}".format(pattern)
+ f"malformed regular expression in replace: {pattern}"
)
return replacements
-def term_width():
+@cache
+def term_width() -> int:
"""Get the width (columns) of the terminal."""
- fallback = config["ui"]["terminal_width"].get(int)
-
- # The fcntl and termios modules are not available on non-Unix
- # platforms, so we fall back to a constant.
- try:
- import fcntl
- import termios
- except ImportError:
- return fallback
-
- try:
- buf = fcntl.ioctl(0, termios.TIOCGWINSZ, " " * 4)
- except OSError:
- return fallback
- try:
- height, width = struct.unpack("hh", buf)
- except struct.error:
- return fallback
- return width
+ columns, _ = shutil.get_terminal_size(fallback=(0, 0))
+ return columns if columns else config["ui"]["terminal_width"].get(int)
def split_into_lines(string, width_tuple):
@@ -765,19 +717,13 @@ def split_into_lines(string, width_tuple):
"""
first_width, middle_width, last_width = width_tuple
words = []
- esc_text = re.compile(
- r"""(?P[^\x1b]*)
- (?P(?:\x1b\[[;\d]*[A-Za-z])+)
- (?P[^\x1b]+)(?P\x1b\[39;49;00m)
- (?P[^\x1b]*)""",
- re.VERBOSE,
- )
+
if uncolorize(string) == string:
# No colors in string
words = string.split()
else:
# Use a regex to find escapes and the text within them.
- for m in esc_text.finditer(string):
+ for m in ESC_TEXT_REGEX.finditer(string):
# m contains four groups:
# pretext - any text before escape sequence
# esc - intitial escape sequence
@@ -806,17 +752,17 @@ def split_into_lines(string, width_tuple):
# Colorize each word with pre/post escapes
# Reconstruct colored words
words += [
- m.group("esc") + raw_word + RESET_COLOR
+ f"{m['esc']}{raw_word}{RESET_COLOR}"
for raw_word in raw_words
]
elif raw_words:
# Pretext stops mid-word
if m.group("esc") != RESET_COLOR:
# Add the rest of the current word, with a reset after it
- words[-1] += m.group("esc") + raw_words[0] + RESET_COLOR
+ words[-1] += f"{m['esc']}{raw_words[0]}{RESET_COLOR}"
# Add the subsequent colored words:
words += [
- m.group("esc") + raw_word + RESET_COLOR
+ f"{m['esc']}{raw_word}{RESET_COLOR}"
for raw_word in raw_words[1:]
]
else:
@@ -907,18 +853,12 @@ def print_column_layout(
With subsequent lines (i.e. {lhs1}, {rhs1} onwards) being the
rest of contents, wrapped if the width would be otherwise exceeded.
"""
- if right["prefix"] + right["contents"] + right["suffix"] == "":
+ if f"{right['prefix']}{right['contents']}{right['suffix']}" == "":
# No right hand information, so we don't need a separator.
separator = ""
first_line_no_wrap = (
- indent_str
- + left["prefix"]
- + left["contents"]
- + left["suffix"]
- + separator
- + right["prefix"]
- + right["contents"]
- + right["suffix"]
+ f"{indent_str}{left['prefix']}{left['contents']}{left['suffix']}"
+ f"{separator}{right['prefix']}{right['contents']}{right['suffix']}"
)
if color_len(first_line_no_wrap) < max_width:
# Everything fits, print out line.
@@ -1044,18 +984,12 @@ def print_newline_layout(
If {lhs0} would go over the maximum width, the subsequent lines are
indented a second time for ease of reading.
"""
- if right["prefix"] + right["contents"] + right["suffix"] == "":
+ if f"{right['prefix']}{right['contents']}{right['suffix']}" == "":
# No right hand information, so we don't need a separator.
separator = ""
first_line_no_wrap = (
- indent_str
- + left["prefix"]
- + left["contents"]
- + left["suffix"]
- + separator
- + right["prefix"]
- + right["contents"]
- + right["suffix"]
+ f"{indent_str}{left['prefix']}{left['contents']}{left['suffix']}"
+ f"{separator}{right['prefix']}{right['contents']}{right['suffix']}"
)
if color_len(first_line_no_wrap) < max_width:
# Everything fits, print out line.
@@ -1069,7 +1003,7 @@ def print_newline_layout(
empty_space - len(indent_str),
empty_space - len(indent_str),
)
- left_str = left["prefix"] + left["contents"] + left["suffix"]
+ left_str = f"{left['prefix']}{left['contents']}{left['suffix']}"
left_split = split_into_lines(left_str, left_width_tuple)
# Repeat calculations for rhs, including separator on first line
right_width_tuple = (
@@ -1077,19 +1011,19 @@ def print_newline_layout(
empty_space - len(indent_str),
empty_space - len(indent_str),
)
- right_str = right["prefix"] + right["contents"] + right["suffix"]
+ right_str = f"{right['prefix']}{right['contents']}{right['suffix']}"
right_split = split_into_lines(right_str, right_width_tuple)
for i, line in enumerate(left_split):
if i == 0:
- print_(indent_str + line)
+ print_(f"{indent_str}{line}")
elif line != "":
# Ignore empty lines
- print_(indent_str * 2 + line)
+ print_(f"{indent_str * 2}{line}")
for i, line in enumerate(right_split):
if i == 0:
- print_(indent_str + separator + line)
+ print_(f"{indent_str}{separator}{line}")
elif line != "":
- print_(indent_str * 2 + line)
+ print_(f"{indent_str * 2}{line}")
FLOAT_EPSILON = 0.01
@@ -1122,13 +1056,15 @@ def _field_diff(field, old, old_fmt, new, new_fmt):
if isinstance(oldval, str):
oldstr, newstr = colordiff(oldval, newstr)
else:
- oldstr = colorize("text_error", oldstr)
- newstr = colorize("text_error", newstr)
+ oldstr = colorize("text_diff_removed", oldstr)
+ newstr = colorize("text_diff_added", newstr)
return f"{oldstr} -> {newstr}"
-def show_model_changes(new, old=None, fields=None, always=False):
+def show_model_changes(
+ new, old=None, fields=None, always=False, print_obj: bool = True
+):
"""Given a Model object, print a list of changes from its pristine
version stored in the database. Return a boolean indicating whether
any changes were found.
@@ -1163,11 +1099,11 @@ def show_model_changes(new, old=None, fields=None, always=False):
continue
changes.append(
- " {}: {}".format(field, colorize("text_highlight", new_fmt[field]))
+ f" {field}: {colorize('text_highlight', new_fmt[field])}"
)
# Print changes.
- if changes or always:
+ if print_obj and (changes or always):
print_(format(old))
if changes:
print_("\n".join(changes))
@@ -1204,22 +1140,16 @@ def show_path_changes(path_changes):
# Print every change over two lines
for source, dest in zip(sources, destinations):
color_source, color_dest = colordiff(source, dest)
- print_("{0} \n -> {1}".format(color_source, color_dest))
+ print_(f"{color_source} \n -> {color_dest}")
else:
# Print every change on a single line, and add a header
title_pad = max_width - len("Source ") + len(" -> ")
- print_("Source {0} Destination".format(" " * title_pad))
+ print_(f"Source {' ' * title_pad} Destination")
for source, dest in zip(sources, destinations):
pad = max_width - len(source)
color_source, color_dest = colordiff(source, dest)
- print_(
- "{0} {1} -> {2}".format(
- color_source,
- " " * pad,
- color_dest,
- )
- )
+ print_(f"{color_source} {' ' * pad} -> {color_dest}")
# Helper functions for option parsing.
@@ -1245,9 +1175,7 @@ def _store_dict(option, opt_str, value, parser):
raise ValueError
except ValueError:
raise UserError(
- "supplied argument `{}' is not of the form `key=value'".format(
- value
- )
+ f"supplied argument `{value}' is not of the form `key=value'"
)
option_values[key] = value
@@ -1426,8 +1354,8 @@ class Subcommand:
@root_parser.setter
def root_parser(self, root_parser):
self._root_parser = root_parser
- self.parser.prog = "{} {}".format(
- as_string(root_parser.get_prog_name()), self.name
+ self.parser.prog = (
+ f"{as_string(root_parser.get_prog_name())} {self.name}"
)
@@ -1483,7 +1411,7 @@ class SubcommandsOptionParser(CommonOptionsParser):
for subcommand in subcommands:
name = subcommand.name
if subcommand.aliases:
- name += " (%s)" % ", ".join(subcommand.aliases)
+ name += f" ({', '.join(subcommand.aliases)})"
disp_names.append(name)
# Set the help position based on the max width.
@@ -1496,32 +1424,24 @@ class SubcommandsOptionParser(CommonOptionsParser):
# Lifted directly from optparse.py.
name_width = help_position - formatter.current_indent - 2
if len(name) > name_width:
- name = "%*s%s\n" % (formatter.current_indent, "", name)
+ name = f"{' ' * formatter.current_indent}{name}\n"
indent_first = help_position
else:
- name = "%*s%-*s " % (
- formatter.current_indent,
- "",
- name_width,
- name,
- )
+ name = f"{' ' * formatter.current_indent}{name:<{name_width}}\n"
indent_first = 0
result.append(name)
help_width = formatter.width - help_position
help_lines = textwrap.wrap(subcommand.help, help_width)
help_line = help_lines[0] if help_lines else ""
- result.append("%*s%s\n" % (indent_first, "", help_line))
+ result.append(f"{' ' * indent_first}{help_line}\n")
result.extend(
- [
- "%*s%s\n" % (help_position, "", line)
- for line in help_lines[1:]
- ]
+ [f"{' ' * help_position}{line}\n" for line in help_lines[1:]]
)
formatter.dedent()
# Concatenate the original help message with the subcommand
# list.
- return out + "".join(result)
+ return f"{out}{''.join(result)}"
def _subcommand_for_name(self, name):
"""Return the subcommand in self.subcommands matching the
@@ -1615,19 +1535,19 @@ def _configure(options):
if overlay_path:
log.debug(
- "overlaying configuration: {0}", util.displayable_path(overlay_path)
+ "overlaying configuration: {}", util.displayable_path(overlay_path)
)
config_path = config.user_config_path()
if os.path.isfile(config_path):
- log.debug("user configuration: {0}", util.displayable_path(config_path))
+ log.debug("user configuration: {}", util.displayable_path(config_path))
else:
log.debug(
- "no user configuration found at {0}",
+ "no user configuration found at {}",
util.displayable_path(config_path),
)
- log.debug("data directory: {0}", util.displayable_path(config.config_dir()))
+ log.debug("data directory: {}", util.displayable_path(config.config_dir()))
return config
@@ -1637,10 +1557,8 @@ def _ensure_db_directory_exists(path):
newpath = os.path.dirname(path)
if not os.path.isdir(newpath):
if input_yn(
- "The database directory {} does not \
- exist. Create it (Y/n)?".format(
- util.displayable_path(newpath)
- )
+ f"The database directory {util.displayable_path(newpath)} does not"
+ " exist. Create it (Y/n)?"
):
os.makedirs(newpath)
@@ -1660,12 +1578,11 @@ def _open_library(config: confuse.LazyConfig) -> library.Library:
except (sqlite3.OperationalError, sqlite3.DatabaseError) as db_error:
log.debug("{}", traceback.format_exc())
raise UserError(
- "database file {} cannot not be opened: {}".format(
- util.displayable_path(dbpath), db_error
- )
+ f"database file {util.displayable_path(dbpath)} cannot not be"
+ f" opened: {db_error}"
)
log.debug(
- "library database: {0}\nlibrary directory: {1}",
+ "library database: {}\nlibrary directory: {}",
util.displayable_path(lib.path),
util.displayable_path(lib.directory),
)
@@ -1782,7 +1699,7 @@ def main(args=None):
_raw_main(args)
except UserError as exc:
message = exc.args[0] if exc.args else None
- log.error("error: {0}", message)
+ log.error("error: {}", message)
sys.exit(1)
except util.HumanReadableError as exc:
exc.log(log)
@@ -1794,10 +1711,10 @@ def main(args=None):
log.error("{}", exc)
sys.exit(1)
except confuse.ConfigError as exc:
- log.error("configuration error: {0}", exc)
+ log.error("configuration error: {}", exc)
sys.exit(1)
except db_query.InvalidQueryError as exc:
- log.error("invalid query: {0}", exc)
+ log.error("invalid query: {}", exc)
sys.exit(1)
except OSError as exc:
if exc.errno == errno.EPIPE:
@@ -1810,7 +1727,7 @@ def main(args=None):
log.debug("{}", traceback.format_exc())
except db.DBAccessError as exc:
log.error(
- "database access error: {0}\n"
+ "database access error: {}\n"
"the library file might have a permissions problem",
exc,
)
diff --git a/beets/ui/commands.py b/beets/ui/commands.py
index 12a8d6875..b52e965b7 100755
--- a/beets/ui/commands.py
+++ b/beets/ui/commands.py
@@ -18,8 +18,10 @@ interface.
import os
import re
+import textwrap
from collections import Counter
from collections.abc import Sequence
+from functools import cached_property
from itertools import chain
from platform import python_version
from typing import Any, NamedTuple
@@ -112,15 +114,11 @@ def _parse_logfiles(logfiles):
yield from _paths_from_logfile(syspath(normpath(logfile)))
except ValueError as err:
raise ui.UserError(
- "malformed logfile {}: {}".format(
- util.displayable_path(logfile), str(err)
- )
+ f"malformed logfile {util.displayable_path(logfile)}: {err}"
) from err
except OSError as err:
raise ui.UserError(
- "unreadable logfile {}: {}".format(
- util.displayable_path(logfile), str(err)
- )
+ f"unreadable logfile {util.displayable_path(logfile)}: {err}"
) from err
@@ -132,13 +130,13 @@ def _print_keys(query):
returned row, with indentation of 2 spaces.
"""
for row in query:
- print_(" " * 2 + row["key"])
+ print_(f" {row['key']}")
def fields_func(lib, opts, args):
def _print_rows(names):
names.sort()
- print_(" " + "\n ".join(names))
+ print_(textwrap.indent("\n".join(names), " "))
print_("Item fields:")
_print_rows(library.Item.all_keys())
@@ -148,13 +146,13 @@ def fields_func(lib, opts, args):
with lib.transaction() as tx:
# The SQL uses the DISTINCT to get unique values from the query
- unique_fields = "SELECT DISTINCT key FROM (%s)"
+ unique_fields = "SELECT DISTINCT key FROM ({})"
print_("Item flexible attributes:")
- _print_keys(tx.query(unique_fields % library.Item._flex_table))
+ _print_keys(tx.query(unique_fields.format(library.Item._flex_table)))
print_("Album flexible attributes:")
- _print_keys(tx.query(unique_fields % library.Album._flex_table))
+ _print_keys(tx.query(unique_fields.format(library.Album._flex_table)))
fields_cmd = ui.Subcommand(
@@ -213,10 +211,10 @@ def get_singleton_disambig_fields(info: hooks.TrackInfo) -> Sequence[str]:
out = []
chosen_fields = config["match"]["singleton_disambig_fields"].as_str_seq()
calculated_values = {
- "index": "Index {}".format(str(info.index)),
- "track_alt": "Track {}".format(info.track_alt),
+ "index": f"Index {info.index}",
+ "track_alt": f"Track {info.track_alt}",
"album": (
- "[{}]".format(info.album)
+ f"[{info.album}]"
if (
config["import"]["singleton_album_disambig"].get()
and info.get("album")
@@ -242,7 +240,7 @@ def get_album_disambig_fields(info: hooks.AlbumInfo) -> Sequence[str]:
chosen_fields = config["match"]["album_disambig_fields"].as_str_seq()
calculated_values = {
"media": (
- "{}x{}".format(info.mediums, info.media)
+ f"{info.mediums}x{info.media}"
if (info.mediums and info.mediums > 1)
else info.media
),
@@ -277,7 +275,7 @@ def dist_string(dist):
"""Formats a distance (a float) as a colorized similarity percentage
string.
"""
- string = "{:.1f}%".format(((1 - dist) * 100))
+ string = f"{(1 - dist) * 100:.1f}%"
return dist_colorize(string, dist)
@@ -295,7 +293,7 @@ def penalty_string(distance, limit=None):
if limit and len(penalties) > limit:
penalties = penalties[:limit] + ["..."]
# Prefix penalty string with U+2260: Not Equal To
- penalty_string = "\u2260 {}".format(", ".join(penalties))
+ penalty_string = f"\u2260 {', '.join(penalties)}"
return ui.colorize("changed", penalty_string)
@@ -306,6 +304,10 @@ class ChangeRepresentation:
TrackMatch object, accordingly.
"""
+ @cached_property
+ def changed_prefix(self) -> str:
+ return ui.colorize("changed", "\u2260")
+
cur_artist = None
# cur_album set if album, cur_title set if singleton
cur_album = None
@@ -360,18 +362,18 @@ class ChangeRepresentation:
# 'Match' line and similarity.
print_(
- self.indent_header + f"Match ({dist_string(self.match.distance)}):"
+ f"{self.indent_header}Match ({dist_string(self.match.distance)}):"
)
if isinstance(self.match.info, autotag.hooks.AlbumInfo):
# Matching an album - print that
artist_album_str = (
- f"{self.match.info.artist}" + f" - {self.match.info.album}"
+ f"{self.match.info.artist} - {self.match.info.album}"
)
else:
# Matching a single track
artist_album_str = (
- f"{self.match.info.artist}" + f" - {self.match.info.title}"
+ f"{self.match.info.artist} - {self.match.info.title}"
)
print_(
self.indent_header
@@ -381,17 +383,17 @@ class ChangeRepresentation:
# Penalties.
penalties = penalty_string(self.match.distance)
if penalties:
- print_(self.indent_header + penalties)
+ print_(f"{self.indent_header}{penalties}")
# Disambiguation.
disambig = disambig_string(self.match.info)
if disambig:
- print_(self.indent_header + disambig)
+ print_(f"{self.indent_header}{disambig}")
# Data URL.
if self.match.info.data_url:
url = ui.colorize("text_faint", f"{self.match.info.data_url}")
- print_(self.indent_header + url)
+ print_(f"{self.indent_header}{url}")
def show_match_details(self):
"""Print out the details of the match, including changes in album name
@@ -404,9 +406,8 @@ class ChangeRepresentation:
artist_l, artist_r = "", ""
if artist_l != artist_r:
artist_l, artist_r = ui.colordiff(artist_l, artist_r)
- # Prefix with U+2260: Not Equal To
left = {
- "prefix": ui.colorize("changed", "\u2260") + " Artist: ",
+ "prefix": f"{self.changed_prefix} Artist: ",
"contents": artist_l,
"suffix": "",
}
@@ -414,7 +415,7 @@ class ChangeRepresentation:
self.print_layout(self.indent_detail, left, right)
else:
- print_(self.indent_detail + "*", "Artist:", artist_r)
+ print_(f"{self.indent_detail}*", "Artist:", artist_r)
if self.cur_album:
# Album
@@ -424,31 +425,29 @@ class ChangeRepresentation:
and self.match.info.album != VARIOUS_ARTISTS
):
album_l, album_r = ui.colordiff(album_l, album_r)
- # Prefix with U+2260: Not Equal To
left = {
- "prefix": ui.colorize("changed", "\u2260") + " Album: ",
+ "prefix": f"{self.changed_prefix} Album: ",
"contents": album_l,
"suffix": "",
}
right = {"prefix": "", "contents": album_r, "suffix": ""}
self.print_layout(self.indent_detail, left, right)
else:
- print_(self.indent_detail + "*", "Album:", album_r)
+ print_(f"{self.indent_detail}*", "Album:", album_r)
elif self.cur_title:
# Title - for singletons
title_l, title_r = self.cur_title or "", self.match.info.title
if self.cur_title != self.match.info.title:
title_l, title_r = ui.colordiff(title_l, title_r)
- # Prefix with U+2260: Not Equal To
left = {
- "prefix": ui.colorize("changed", "\u2260") + " Title: ",
+ "prefix": f"{self.changed_prefix} Title: ",
"contents": title_l,
"suffix": "",
}
right = {"prefix": "", "contents": title_r, "suffix": ""}
self.print_layout(self.indent_detail, left, right)
else:
- print_(self.indent_detail + "*", "Title:", title_r)
+ print_(f"{self.indent_detail}*", "Title:", title_r)
def make_medium_info_line(self, track_info):
"""Construct a line with the current medium's info."""
@@ -490,7 +489,6 @@ class ChangeRepresentation:
"""Format colored track indices."""
cur_track = self.format_index(item)
new_track = self.format_index(track_info)
- templ = "(#{})"
changed = False
# Choose color based on change.
if cur_track != new_track:
@@ -502,10 +500,8 @@ class ChangeRepresentation:
else:
highlight_color = "text_faint"
- cur_track = templ.format(cur_track)
- new_track = templ.format(new_track)
- lhs_track = ui.colorize(highlight_color, cur_track)
- rhs_track = ui.colorize(highlight_color, new_track)
+ lhs_track = ui.colorize(highlight_color, f"(#{cur_track})")
+ rhs_track = ui.colorize(highlight_color, f"(#{new_track})")
return lhs_track, rhs_track, changed
@staticmethod
@@ -573,11 +569,10 @@ class ChangeRepresentation:
# the case, thus the 'info' dictionary is unneeded.
# penalties = penalty_string(self.match.distance.tracks[track_info])
- prefix = ui.colorize("changed", "\u2260 ") if changed else "* "
lhs = {
- "prefix": prefix + lhs_track + " ",
+ "prefix": f"{self.changed_prefix if changed else '*'} {lhs_track} ",
"contents": lhs_title,
- "suffix": " " + lhs_length,
+ "suffix": f" {lhs_length}",
}
rhs = {"prefix": "", "contents": "", "suffix": ""}
if not changed:
@@ -586,9 +581,9 @@ class ChangeRepresentation:
else:
# Construct a dictionary for the "changed to" side
rhs = {
- "prefix": rhs_track + " ",
+ "prefix": f"{rhs_track} ",
"contents": rhs_title,
- "suffix": " " + rhs_length,
+ "suffix": f" {rhs_length}",
}
return (lhs, rhs)
@@ -681,7 +676,7 @@ class AlbumChange(ChangeRepresentation):
# Print tracks from previous medium
self.print_tracklist(lines)
lines = []
- print_(self.indent_detail + header)
+ print_(f"{self.indent_detail}{header}")
# Save new medium details for future comparison.
medium, disctitle = track_info.medium, track_info.disctitle
@@ -697,11 +692,9 @@ class AlbumChange(ChangeRepresentation):
# Missing and unmatched tracks.
if self.match.extra_tracks:
print_(
- "Missing tracks ({0}/{1} - {2:.1%}):".format(
- len(self.match.extra_tracks),
- len(self.match.info.tracks),
- len(self.match.extra_tracks) / len(self.match.info.tracks),
- )
+ "Missing tracks"
+ f" ({len(self.match.extra_tracks)}/{len(self.match.info.tracks)} -"
+ f" {len(self.match.extra_tracks) / len(self.match.info.tracks):.1%}):"
)
for track_info in self.match.extra_tracks:
line = f" ! {track_info.title} (#{self.format_index(track_info)})"
@@ -711,9 +704,9 @@ class AlbumChange(ChangeRepresentation):
if self.match.extra_items:
print_(f"Unmatched tracks ({len(self.match.extra_items)}):")
for item in self.match.extra_items:
- line = " ! {} (#{})".format(item.title, self.format_index(item))
+ line = f" ! {item.title} (#{self.format_index(item)})"
if item.length:
- line += " ({})".format(human_seconds_short(item.length))
+ line += f" ({human_seconds_short(item.length)})"
print_(ui.colorize("text_warning", line))
@@ -769,7 +762,7 @@ def summarize_items(items, singleton):
"""
summary_parts = []
if not singleton:
- summary_parts.append("{} items".format(len(items)))
+ summary_parts.append(f"{len(items)} items")
format_counts = {}
for item in items:
@@ -789,10 +782,11 @@ def summarize_items(items, singleton):
average_bitrate = sum([item.bitrate for item in items]) / len(items)
total_duration = sum([item.length for item in items])
total_filesize = sum([item.filesize for item in items])
- summary_parts.append("{}kbps".format(int(average_bitrate / 1000)))
+ summary_parts.append(f"{int(average_bitrate / 1000)}kbps")
if items[0].format == "FLAC":
- sample_bits = "{}kHz/{} bit".format(
- round(int(items[0].samplerate) / 1000, 1), items[0].bitdepth
+ sample_bits = (
+ f"{round(int(items[0].samplerate) / 1000, 1)}kHz"
+ f"/{items[0].bitdepth} bit"
)
summary_parts.append(sample_bits)
summary_parts.append(human_seconds_short(total_duration))
@@ -885,7 +879,7 @@ def choose_candidate(
if singleton:
print_("No matching recordings found.")
else:
- print_("No matching release found for {} tracks.".format(itemcount))
+ print_(f"No matching release found for {itemcount} tracks.")
print_(
"For help, see: "
"https://beets.readthedocs.org/en/latest/faq.html#nomatch"
@@ -910,40 +904,38 @@ def choose_candidate(
# Display list of candidates.
print_("")
print_(
- 'Finding tags for {} "{} - {}".'.format(
- "track" if singleton else "album",
- item.artist if singleton else cur_artist,
- item.title if singleton else cur_album,
- )
+ f"Finding tags for {'track' if singleton else 'album'} "
+ f'"{item.artist if singleton else cur_artist} -'
+ f' {item.title if singleton else cur_album}".'
)
- print_(ui.indent(2) + "Candidates:")
+ print_(" Candidates:")
for i, match in enumerate(candidates):
# Index, metadata, and distance.
- index0 = "{0}.".format(i + 1)
+ index0 = f"{i + 1}."
index = dist_colorize(index0, match.distance)
- dist = "({:.1f}%)".format((1 - match.distance) * 100)
+ dist = f"({(1 - match.distance) * 100:.1f}%)"
distance = dist_colorize(dist, match.distance)
- metadata = "{0} - {1}".format(
- match.info.artist,
- match.info.title if singleton else match.info.album,
+ metadata = (
+ f"{match.info.artist} -"
+ f" {match.info.title if singleton else match.info.album}"
)
if i == 0:
metadata = dist_colorize(metadata, match.distance)
else:
metadata = ui.colorize("text_highlight_minor", metadata)
line1 = [index, distance, metadata]
- print_(ui.indent(2) + " ".join(line1))
+ print_(f" {' '.join(line1)}")
# Penalties.
penalties = penalty_string(match.distance, 3)
if penalties:
- print_(ui.indent(13) + penalties)
+ print_(f"{' ' * 13}{penalties}")
# Disambiguation
disambig = disambig_string(match.info)
if disambig:
- print_(ui.indent(13) + disambig)
+ print_(f"{' ' * 13}{disambig}")
# Ask the user for a choice.
sel = ui.input_options(choice_opts, numrange=(1, len(candidates)))
@@ -1015,7 +1007,7 @@ def manual_id(session, task):
Input an ID, either for an album ("release") or a track ("recording").
"""
- prompt = "Enter {} ID:".format("release" if task.is_album else "recording")
+ prompt = f"Enter {'release' if task.is_album else 'recording'} ID:"
search_id = input_(prompt).strip()
if task.is_album:
@@ -1043,7 +1035,7 @@ class TerminalImportSession(importer.ImportSession):
path_str0 = displayable_path(task.paths, "\n")
path_str = ui.colorize("import_path", path_str0)
- items_str0 = "({} items)".format(len(task.items))
+ items_str0 = f"({len(task.items)} items)"
items_str = ui.colorize("import_path_items", items_str0)
print_(" ".join([path_str, items_str]))
@@ -1156,7 +1148,7 @@ class TerminalImportSession(importer.ImportSession):
that's already in the library.
"""
log.warning(
- "This {0} is already in the library!",
+ "This {} is already in the library!",
("album" if task.is_album else "item"),
)
@@ -1217,8 +1209,8 @@ class TerminalImportSession(importer.ImportSession):
def should_resume(self, path):
return ui.input_yn(
- "Import of the directory:\n{}\n"
- "was interrupted. Resume (Y/n)?".format(displayable_path(path))
+ f"Import of the directory:\n{displayable_path(path)}\n"
+ "was interrupted. Resume (Y/n)?"
)
def _get_choices(self, task):
@@ -1288,11 +1280,10 @@ class TerminalImportSession(importer.ImportSession):
dup_choices = [c for c in all_choices if c.short == short]
for c in dup_choices[1:]:
log.warning(
- "Prompt choice '{0}' removed due to conflict "
- "with '{1}' (short letter: '{2}')",
- c.long,
- dup_choices[0].long,
- c.short,
+ "Prompt choice '{0.long}' removed due to conflict "
+ "with '{1[0].long}' (short letter: '{0.short}')",
+ c,
+ dup_choices,
)
extra_choices.remove(c)
@@ -1317,7 +1308,8 @@ def import_files(lib, paths: list[bytes], query):
loghandler = logging.FileHandler(logpath, encoding="utf-8")
except OSError:
raise ui.UserError(
- f"Could not open log file for writing: {displayable_path(logpath)}"
+ "Could not open log file for writing:"
+ f" {displayable_path(logpath)}"
)
else:
loghandler = None
@@ -1362,9 +1354,7 @@ def import_func(lib, opts, args: list[str]):
for path in byte_paths:
if not os.path.exists(syspath(normpath(path))):
raise ui.UserError(
- "no such file or directory: {}".format(
- displayable_path(path)
- )
+ f"no such file or directory: {displayable_path(path)}"
)
# Check the directories from the logfiles, but don't throw an error in
@@ -1374,9 +1364,7 @@ def import_func(lib, opts, args: list[str]):
for path in paths_from_logfiles:
if not os.path.exists(syspath(normpath(path))):
log.warning(
- "No such file or directory: {}".format(
- displayable_path(path)
- )
+ "No such file or directory: {}", displayable_path(path)
)
continue
@@ -1650,9 +1638,8 @@ def update_items(lib, query, album, move, pretend, fields, exclude_fields=None):
# Did the item change since last checked?
if item.current_mtime() <= item.mtime:
log.debug(
- "skipping {0} because mtime is up to date ({1})",
- displayable_path(item.path),
- item.mtime,
+ "skipping {0.filepath} because mtime is up to date ({0.mtime})",
+ item,
)
continue
@@ -1660,9 +1647,7 @@ def update_items(lib, query, album, move, pretend, fields, exclude_fields=None):
try:
item.read()
except library.ReadError as exc:
- log.error(
- "error reading {0}: {1}", displayable_path(item.path), exc
- )
+ log.error("error reading {.filepath}: {}", item, exc)
continue
# Special-case album artist when it matches track artist. (Hacky
@@ -1703,7 +1688,7 @@ def update_items(lib, query, album, move, pretend, fields, exclude_fields=None):
continue
album = lib.get_album(album_id)
if not album: # Empty albums have already been removed.
- log.debug("emptied album {0}", album_id)
+ log.debug("emptied album {}", album_id)
continue
first_item = album.items().get()
@@ -1714,7 +1699,7 @@ def update_items(lib, query, album, move, pretend, fields, exclude_fields=None):
# Move album art (and any inconsistent items).
if move and lib.directory in ancestry(first_item.path):
- log.debug("moving album {0}", album_id)
+ log.debug("moving album {}", album_id)
# Manually moving and storing the album.
items = list(album.items())
@@ -1808,7 +1793,7 @@ def remove_items(lib, query, album, delete, force):
if not force:
# Prepare confirmation with user.
album_str = (
- " in {} album{}".format(len(albums), "s" if len(albums) > 1 else "")
+ f" in {len(albums)} album{'s' if len(albums) > 1 else ''}"
if album
else ""
)
@@ -1816,14 +1801,17 @@ def remove_items(lib, query, album, delete, force):
if delete:
fmt = "$path - $title"
prompt = "Really DELETE"
- prompt_all = "Really DELETE {} file{}{}".format(
- len(items), "s" if len(items) > 1 else "", album_str
+ prompt_all = (
+ "Really DELETE"
+ f" {len(items)} file{'s' if len(items) > 1 else ''}{album_str}"
)
else:
fmt = ""
prompt = "Really remove from the library?"
- prompt_all = "Really remove {} item{}{} from the library?".format(
- len(items), "s" if len(items) > 1 else "", album_str
+ prompt_all = (
+ "Really remove"
+ f" {len(items)} item{'s' if len(items) > 1 else ''}{album_str}"
+ " from the library?"
)
# Helpers for printing affected items
@@ -1892,7 +1880,7 @@ def show_stats(lib, query, exact):
try:
total_size += os.path.getsize(syspath(item.path))
except OSError as exc:
- log.info("could not get size of {}: {}", item.path, exc)
+ log.info("could not get size of {.path}: {}", item, exc)
else:
total_size += int(item.length * item.bitrate / 8)
total_time += item.length
@@ -1902,27 +1890,17 @@ def show_stats(lib, query, exact):
if item.album_id:
albums.add(item.album_id)
- size_str = "" + human_bytes(total_size)
+ size_str = human_bytes(total_size)
if exact:
size_str += f" ({total_size} bytes)"
- print_(
- """Tracks: {}
-Total time: {}{}
-{}: {}
-Artists: {}
-Albums: {}
-Album artists: {}""".format(
- total_items,
- human_seconds(total_time),
- f" ({total_time:.2f} seconds)" if exact else "",
- "Total size" if exact else "Approximate total size",
- size_str,
- len(artists),
- len(albums),
- len(album_artists),
- ),
- )
+ print_(f"""Tracks: {total_items}
+Total time: {human_seconds(total_time)}
+{f" ({total_time:.2f} seconds)" if exact else ""}
+{"Total size" if exact else "Approximate total size"}: {size_str}
+Artists: {len(artists)}
+Albums: {len(albums)}
+Album artists: {len(album_artists)}""")
def stats_func(lib, opts, args):
@@ -1943,7 +1921,7 @@ default_commands.append(stats_cmd)
def show_version(lib, opts, args):
- print_("beets version %s" % beets.__version__)
+ print_(f"beets version {beets.__version__}")
print_(f"Python version {python_version()}")
# Show plugins.
names = sorted(p.name for p in plugins.find_plugins())
@@ -1977,7 +1955,7 @@ def modify_items(lib, mods, dels, query, write, move, album, confirm, inherit):
# Apply changes *temporarily*, preview them, and collect modified
# objects.
- print_("Modifying {} {}s.".format(len(objs), "album" if album else "item"))
+ print_(f"Modifying {len(objs)} {'album' if album else 'item'}s.")
changed = []
templates = {
key: functemplate.template(value) for key, value in mods.items()
@@ -2007,7 +1985,7 @@ def modify_items(lib, mods, dels, query, write, move, album, confirm, inherit):
extra = ""
changed = ui.input_select_objects(
- "Really modify%s" % extra,
+ f"Really modify{extra}",
changed,
lambda o: print_and_modify(o, mods, dels),
)
@@ -2159,7 +2137,7 @@ def move_items(
act = "copy" if copy else "move"
entity = "album" if album else "item"
log.info(
- "{0} {1} {2}{3}{4}.",
+ "{} {} {}{}{}.",
action,
len(objs),
entity,
@@ -2185,7 +2163,7 @@ def move_items(
else:
if confirm:
objs = ui.input_select_objects(
- "Really %s" % act,
+ f"Really {act}",
objs,
lambda o: show_path_changes(
[(o.path, o.destination(basedir=dest))]
@@ -2193,7 +2171,7 @@ def move_items(
)
for obj in objs:
- log.debug("moving: {0}", util.displayable_path(obj.path))
+ log.debug("moving: {.filepath}", obj)
if export:
# Copy without affecting the database.
@@ -2213,9 +2191,7 @@ def move_func(lib, opts, args):
if dest is not None:
dest = normpath(dest)
if not os.path.isdir(syspath(dest)):
- raise ui.UserError(
- "no such directory: {}".format(displayable_path(dest))
- )
+ raise ui.UserError(f"no such directory: {displayable_path(dest)}")
move_items(
lib,
@@ -2278,16 +2254,14 @@ def write_items(lib, query, pretend, force):
for item in items:
# Item deleted?
if not os.path.exists(syspath(item.path)):
- log.info("missing file: {0}", util.displayable_path(item.path))
+ log.info("missing file: {.filepath}", item)
continue
# Get an Item object reflecting the "clean" (on-disk) state.
try:
clean_item = library.Item.from_path(item.path)
except library.ReadError as exc:
- log.error(
- "error reading {0}: {1}", displayable_path(item.path), exc
- )
+ log.error("error reading {.filepath}: {}", item, exc)
continue
# Check for and display changes.
@@ -2480,30 +2454,27 @@ def completion_script(commands):
yield "_beet() {\n"
# Command names
- yield " local commands='%s'\n" % " ".join(command_names)
+ yield f" local commands={' '.join(command_names)!r}\n"
yield "\n"
# Command aliases
- yield " local aliases='%s'\n" % " ".join(aliases.keys())
+ yield f" local aliases={' '.join(aliases.keys())!r}\n"
for alias, cmd in aliases.items():
- yield " local alias__{}={}\n".format(alias.replace("-", "_"), cmd)
+ yield f" local alias__{alias.replace('-', '_')}={cmd}\n"
yield "\n"
# Fields
- yield " fields='%s'\n" % " ".join(
- set(
- list(library.Item._fields.keys())
- + list(library.Album._fields.keys())
- )
- )
+ fields = library.Item._fields.keys() | library.Album._fields.keys()
+ yield f" fields={' '.join(fields)!r}\n"
# Command options
for cmd, opts in options.items():
for option_type, option_list in opts.items():
if option_list:
option_list = " ".join(option_list)
- yield " local {}__{}='{}'\n".format(
- option_type, cmd.replace("-", "_"), option_list
+ yield (
+ " local"
+ f" {option_type}__{cmd.replace('-', '_')}='{option_list}'\n"
)
yield " _beet_dispatch\n"
diff --git a/beets/util/__init__.py b/beets/util/__init__.py
index e2f7f46bd..fc05e4997 100644
--- a/beets/util/__init__.py
+++ b/beets/util/__init__.py
@@ -47,6 +47,7 @@ from typing import (
NamedTuple,
TypeVar,
Union,
+ cast,
)
from unidecode import unidecode
@@ -112,7 +113,7 @@ class HumanReadableError(Exception):
elif hasattr(self.reason, "strerror"): # i.e., EnvironmentError
return self.reason.strerror
else:
- return '"{}"'.format(str(self.reason))
+ return f'"{self.reason}"'
def get_message(self):
"""Create the human-readable description of the error, sans
@@ -126,7 +127,7 @@ class HumanReadableError(Exception):
"""
if self.tb:
logger.debug(self.tb)
- logger.error("{0}: {1}", self.error_kind, self.args[0])
+ logger.error("{0.error_kind}: {0.args[0]}", self)
class FilesystemError(HumanReadableError):
@@ -142,18 +143,16 @@ class FilesystemError(HumanReadableError):
def get_message(self):
# Use a nicer English phrasing for some specific verbs.
if self.verb in ("move", "copy", "rename"):
- clause = "while {} {} to {}".format(
- self._gerund(),
- displayable_path(self.paths[0]),
- displayable_path(self.paths[1]),
+ clause = (
+ f"while {self._gerund()} {displayable_path(self.paths[0])} to"
+ f" {displayable_path(self.paths[1])}"
)
elif self.verb in ("delete", "write", "create", "read"):
- clause = "while {} {}".format(
- self._gerund(), displayable_path(self.paths[0])
- )
+ clause = f"while {self._gerund()} {displayable_path(self.paths[0])}"
else:
- clause = "during {} of paths {}".format(
- self.verb, ", ".join(displayable_path(p) for p in self.paths)
+ clause = (
+ f"during {self.verb} of paths"
+ f" {', '.join(displayable_path(p) for p in self.paths)}"
)
return f"{self._reasonstr()} {clause}"
@@ -223,12 +222,12 @@ def sorted_walk(
# Get all the directories and files at this level.
try:
contents = os.listdir(syspath(bytes_path))
- except OSError as exc:
+ except OSError:
if logger:
logger.warning(
- "could not list directory {}: {}".format(
- displayable_path(bytes_path), exc.strerror
- )
+ "could not list directory {}",
+ displayable_path(bytes_path),
+ exc_info=True,
)
return
dirs = []
@@ -436,8 +435,8 @@ def syspath(path: PathLike, prefix: bool = True) -> str:
if prefix and not str_path.startswith(WINDOWS_MAGIC_PREFIX):
if str_path.startswith("\\\\"):
# UNC path. Final path should look like \\?\UNC\...
- str_path = "UNC" + str_path[1:]
- str_path = WINDOWS_MAGIC_PREFIX + str_path
+ str_path = f"UNC{str_path[1:]}"
+ str_path = f"{WINDOWS_MAGIC_PREFIX}{str_path}"
return str_path
@@ -509,8 +508,8 @@ def move(path: bytes, dest: bytes, replace: bool = False):
basename = os.path.basename(bytestring_path(dest))
dirname = os.path.dirname(bytestring_path(dest))
tmp = tempfile.NamedTemporaryFile(
- suffix=syspath(b".beets", prefix=False),
- prefix=syspath(b"." + basename + b".", prefix=False),
+ suffix=".beets",
+ prefix=f".{os.fsdecode(basename)}.",
dir=syspath(dirname),
delete=False,
)
@@ -719,7 +718,7 @@ def truncate_path(str_path: str) -> str:
path = Path(str_path)
parent_parts = [truncate_str(p, max_length) for p in path.parts[:-1]]
stem = truncate_str(path.stem, max_length - len(path.suffix))
- return str(Path(*parent_parts, stem)) + path.suffix
+ return f"{Path(*parent_parts, stem)}{path.suffix}"
def _legalize_stage(
@@ -838,9 +837,10 @@ def get_most_common_tags(
"country",
"media",
"albumdisambig",
+ "data_source",
]
for field in fields:
- values = [item[field] for item in items if item]
+ values = [item.get(field) for item in items if item]
likelies[field], freq = plurality(values)
consensus[field] = freq == len(values)
@@ -1053,7 +1053,7 @@ def par_map(transform: Callable[[T], Any], items: Sequence[T]) -> None:
pool.join()
-class cached_classproperty:
+class cached_classproperty(Generic[T]):
"""Descriptor implementing cached class properties.
Provides class-level dynamic property behavior where the getter function is
@@ -1061,9 +1061,9 @@ class cached_classproperty:
instance properties, this operates on the class rather than instances.
"""
- cache: ClassVar[dict[tuple[Any, str], Any]] = {}
+ cache: ClassVar[dict[tuple[type[object], str], object]] = {}
- name: str
+ name: str = ""
# Ideally, we would like to use `Callable[[type[T]], Any]` here,
# however, `mypy` is unable to see this as a **class** property, and thinks
@@ -1079,21 +1079,21 @@ class cached_classproperty:
# "Callable[[Album], ...]"; expected "Callable[[type[Album]], ...]"
#
# Therefore, we just use `Any` here, which is not ideal, but works.
- def __init__(self, getter: Callable[[Any], Any]) -> None:
+ def __init__(self, getter: Callable[..., T]) -> None:
"""Initialize the descriptor with the property getter function."""
- self.getter = getter
+ self.getter: Callable[..., T] = getter
- def __set_name__(self, owner: Any, name: str) -> None:
+ def __set_name__(self, owner: object, name: str) -> None:
"""Capture the attribute name this descriptor is assigned to."""
self.name = name
- def __get__(self, instance: Any, owner: type[Any]) -> Any:
+ def __get__(self, instance: object, owner: type[object]) -> T:
"""Compute and cache if needed, and return the property value."""
- key = owner, self.name
+ key: tuple[type[object], str] = owner, self.name
if key not in self.cache:
self.cache[key] = self.getter(owner)
- return self.cache[key]
+ return cast(T, self.cache[key])
class LazySharedInstance(Generic[T]):
diff --git a/beets/util/artresizer.py b/beets/util/artresizer.py
index fe67c506e..5ecde5140 100644
--- a/beets/util/artresizer.py
+++ b/beets/util/artresizer.py
@@ -54,7 +54,7 @@ def resize_url(url: str, maxwidth: int, quality: int = 0) -> str:
if quality > 0:
params["q"] = quality
- return "{}?{}".format(PROXY_URL, urlencode(params))
+ return f"{PROXY_URL}?{urlencode(params)}"
class LocalBackendNotAvailableError(Exception):
@@ -255,7 +255,7 @@ class IMBackend(LocalBackend):
path_out = get_temp_filename(__name__, "resize_IM_", path_in)
log.debug(
- "artresizer: ImageMagick resizing {0} to {1}",
+ "artresizer: ImageMagick resizing {} to {}",
displayable_path(path_in),
displayable_path(path_out),
)
@@ -287,7 +287,7 @@ class IMBackend(LocalBackend):
util.command_output(cmd)
except subprocess.CalledProcessError:
log.warning(
- "artresizer: IM convert failed for {0}",
+ "artresizer: IM convert failed for {}",
displayable_path(path_in),
)
return path_in
@@ -306,9 +306,9 @@ class IMBackend(LocalBackend):
except subprocess.CalledProcessError as exc:
log.warning("ImageMagick size query failed")
log.debug(
- "`convert` exited with (status {}) when "
+ "`convert` exited with (status {.returncode}) when "
"getting size with command {}:\n{}",
- exc.returncode,
+ exc,
cmd,
exc.output.strip(),
)
@@ -441,8 +441,8 @@ class IMBackend(LocalBackend):
convert_proc.wait()
if convert_proc.returncode:
log.debug(
- "ImageMagick convert failed with status {}: {!r}",
- convert_proc.returncode,
+ "ImageMagick convert failed with status {.returncode}: {!r}",
+ convert_proc,
convert_stderr,
)
return None
@@ -452,7 +452,7 @@ class IMBackend(LocalBackend):
if compare_proc.returncode:
if compare_proc.returncode != 1:
log.debug(
- "ImageMagick compare failed: {0}, {1}",
+ "ImageMagick compare failed: {}, {}",
displayable_path(im2),
displayable_path(im1),
)
@@ -472,7 +472,7 @@ class IMBackend(LocalBackend):
log.debug("IM output is not a number: {0!r}", out_str)
return None
- log.debug("ImageMagick compare score: {0}", phash_diff)
+ log.debug("ImageMagick compare score: {}", phash_diff)
return phash_diff <= compare_threshold
@property
@@ -523,7 +523,7 @@ class PILBackend(LocalBackend):
from PIL import Image
log.debug(
- "artresizer: PIL resizing {0} to {1}",
+ "artresizer: PIL resizing {} to {}",
displayable_path(path_in),
displayable_path(path_out),
)
@@ -552,7 +552,7 @@ class PILBackend(LocalBackend):
for i in range(5):
# 5 attempts is an arbitrary choice
filesize = os.stat(syspath(path_out)).st_size
- log.debug("PIL Pass {0} : Output size: {1}B", i, filesize)
+ log.debug("PIL Pass {} : Output size: {}B", i, filesize)
if filesize <= max_filesize:
return path_out
# The relationship between filesize & quality will be
@@ -569,7 +569,7 @@ class PILBackend(LocalBackend):
progressive=False,
)
log.warning(
- "PIL Failed to resize file to below {0}B", max_filesize
+ "PIL Failed to resize file to below {}B", max_filesize
)
return path_out
@@ -577,7 +577,7 @@ class PILBackend(LocalBackend):
return path_out
except OSError:
log.error(
- "PIL cannot create thumbnail for '{0}'",
+ "PIL cannot create thumbnail for '{}'",
displayable_path(path_in),
)
return path_in
@@ -696,7 +696,7 @@ class ArtResizer:
for backend_cls in BACKEND_CLASSES:
try:
self.local_method = backend_cls()
- log.debug(f"artresizer: method is {self.local_method.NAME}")
+ log.debug("artresizer: method is {.local_method.NAME}", self)
break
except LocalBackendNotAvailableError:
continue
diff --git a/beets/util/bluelet.py b/beets/util/bluelet.py
index b81b389e0..3f3a88b1e 100644
--- a/beets/util/bluelet.py
+++ b/beets/util/bluelet.py
@@ -559,7 +559,7 @@ def spawn(coro):
and child coroutines run concurrently.
"""
if not isinstance(coro, types.GeneratorType):
- raise ValueError("%s is not a coroutine" % coro)
+ raise ValueError(f"{coro} is not a coroutine")
return SpawnEvent(coro)
@@ -569,7 +569,7 @@ def call(coro):
returns a value using end(), then this event returns that value.
"""
if not isinstance(coro, types.GeneratorType):
- raise ValueError("%s is not a coroutine" % coro)
+ raise ValueError(f"{coro} is not a coroutine")
return DelegationEvent(coro)
diff --git a/beets/util/functemplate.py b/beets/util/functemplate.py
index b0daefac2..5d85530a1 100644
--- a/beets/util/functemplate.py
+++ b/beets/util/functemplate.py
@@ -136,7 +136,7 @@ class Symbol:
self.original = original
def __repr__(self):
- return "Symbol(%s)" % repr(self.ident)
+ return f"Symbol({self.ident!r})"
def evaluate(self, env):
"""Evaluate the symbol in the environment, returning a Unicode
@@ -152,7 +152,7 @@ class Symbol:
def translate(self):
"""Compile the variable lookup."""
ident = self.ident
- expr = ex_rvalue(VARIABLE_PREFIX + ident)
+ expr = ex_rvalue(f"{VARIABLE_PREFIX}{ident}")
return [expr], {ident}, set()
@@ -165,9 +165,7 @@ class Call:
self.original = original
def __repr__(self):
- return "Call({}, {}, {})".format(
- repr(self.ident), repr(self.args), repr(self.original)
- )
+ return f"Call({self.ident!r}, {self.args!r}, {self.original!r})"
def evaluate(self, env):
"""Evaluate the function call in the environment, returning a
@@ -180,7 +178,7 @@ class Call:
except Exception as exc:
# Function raised exception! Maybe inlining the name of
# the exception will help debug.
- return "<%s>" % str(exc)
+ return f"<{exc}>"
return str(out)
else:
return self.original
@@ -213,7 +211,7 @@ class Call:
)
)
- subexpr_call = ex_call(FUNCTION_PREFIX + self.ident, arg_exprs)
+ subexpr_call = ex_call(f"{FUNCTION_PREFIX}{self.ident}", arg_exprs)
return [subexpr_call], varnames, funcnames
@@ -226,7 +224,7 @@ class Expression:
self.parts = parts
def __repr__(self):
- return "Expression(%s)" % (repr(self.parts))
+ return f"Expression({self.parts!r})"
def evaluate(self, env):
"""Evaluate the entire expression in the environment, returning
@@ -298,9 +296,6 @@ class Parser:
GROUP_CLOSE,
ESCAPE_CHAR,
)
- special_char_re = re.compile(
- r"[%s]|\Z" % "".join(re.escape(c) for c in special_chars)
- )
escapable_chars = (SYMBOL_DELIM, FUNC_DELIM, GROUP_CLOSE, ARG_SEP)
terminator_chars = (GROUP_CLOSE,)
@@ -312,24 +307,18 @@ class Parser:
"""
# Append comma (ARG_SEP) to the list of special characters only when
# parsing function arguments.
- extra_special_chars = ()
- special_char_re = self.special_char_re
- if self.in_argument:
- extra_special_chars = (ARG_SEP,)
- special_char_re = re.compile(
- r"[%s]|\Z"
- % "".join(
- re.escape(c)
- for c in self.special_chars + extra_special_chars
- )
- )
+ extra_special_chars = (ARG_SEP,) if self.in_argument else ()
+ special_chars = (*self.special_chars, *extra_special_chars)
+ special_char_re = re.compile(
+ rf"[{''.join(map(re.escape, special_chars))}]|\Z"
+ )
text_parts = []
while self.pos < len(self.string):
char = self.string[self.pos]
- if char not in self.special_chars + extra_special_chars:
+ if char not in special_chars:
# A non-special character. Skip to the next special
# character, treating the interstice as literal text.
next_pos = (
@@ -566,9 +555,9 @@ class Template:
argnames = []
for varname in varnames:
- argnames.append(VARIABLE_PREFIX + varname)
+ argnames.append(f"{VARIABLE_PREFIX}{varname}")
for funcname in funcnames:
- argnames.append(FUNCTION_PREFIX + funcname)
+ argnames.append(f"{FUNCTION_PREFIX}{funcname}")
func = compile_func(
argnames,
@@ -578,9 +567,9 @@ class Template:
def wrapper_func(values={}, functions={}):
args = {}
for varname in varnames:
- args[VARIABLE_PREFIX + varname] = values[varname]
+ args[f"{VARIABLE_PREFIX}{varname}"] = values[varname]
for funcname in funcnames:
- args[FUNCTION_PREFIX + funcname] = functions[funcname]
+ args[f"{FUNCTION_PREFIX}{funcname}"] = functions[funcname]
parts = func(**args)
return "".join(parts)
diff --git a/beets/util/id_extractors.py b/beets/util/id_extractors.py
index 6cdb787d1..f66f1690f 100644
--- a/beets/util/id_extractors.py
+++ b/beets/util/id_extractors.py
@@ -58,7 +58,8 @@ def extract_release_id(source: str, id_: str) -> str | None:
source_pattern = PATTERN_BY_SOURCE[source.lower()]
except KeyError:
log.debug(
- f"Unknown source '{source}' for ID extraction. Returning id/url as-is."
+ "Unknown source '{}' for ID extraction. Returning id/url as-is.",
+ source,
)
return id_
diff --git a/beets/util/units.py b/beets/util/units.py
index d07d42546..f5fcb743b 100644
--- a/beets/util/units.py
+++ b/beets/util/units.py
@@ -19,7 +19,7 @@ def human_seconds_short(interval):
string.
"""
interval = int(interval)
- return "%i:%02i" % (interval // 60, interval % 60)
+ return f"{interval // 60}:{interval % 60:02d}"
def human_bytes(size):
diff --git a/beetsplug/_utils/__init__.py b/beetsplug/_utils/__init__.py
new file mode 100644
index 000000000..7453f88bf
--- /dev/null
+++ b/beetsplug/_utils/__init__.py
@@ -0,0 +1,3 @@
+from . import art, vfs
+
+__all__ = ["art", "vfs"]
diff --git a/beets/art.py b/beetsplug/_utils/art.py
similarity index 84%
rename from beets/art.py
rename to beetsplug/_utils/art.py
index 2ff58c309..656c303ce 100644
--- a/beets/art.py
+++ b/beetsplug/_utils/art.py
@@ -38,11 +38,7 @@ def get_art(log, item):
try:
mf = mediafile.MediaFile(syspath(item.path))
except mediafile.UnreadableFileError as exc:
- log.warning(
- "Could not extract art from {0}: {1}",
- displayable_path(item.path),
- exc,
- )
+ log.warning("Could not extract art from {.filepath}: {}", item, exc)
return
return mf.art
@@ -83,16 +79,16 @@ def embed_item(
# Get the `Image` object from the file.
try:
- log.debug("embedding {0}", displayable_path(imagepath))
+ log.debug("embedding {}", displayable_path(imagepath))
image = mediafile_image(imagepath, maxwidth)
except OSError as exc:
- log.warning("could not read image file: {0}", exc)
+ log.warning("could not read image file: {}", exc)
return
# Make sure the image kind is safe (some formats only support PNG
# and JPEG).
if image.mime_type not in ("image/jpeg", "image/png"):
- log.info("not embedding image of unsupported type: {}", image.mime_type)
+ log.info("not embedding image of unsupported type: {.mime_type}", image)
return
item.try_write(path=itempath, tags={"images": [image]}, id3v23=id3v23)
@@ -110,11 +106,11 @@ def embed_album(
"""Embed album art into all of the album's items."""
imagepath = album.artpath
if not imagepath:
- log.info("No album art present for {0}", album)
+ log.info("No album art present for {}", album)
return
if not os.path.isfile(syspath(imagepath)):
log.info(
- "Album art not found at {0} for {1}",
+ "Album art not found at {} for {}",
displayable_path(imagepath),
album,
)
@@ -122,7 +118,7 @@ def embed_album(
if maxwidth:
imagepath = resize_image(log, imagepath, maxwidth, quality)
- log.info("Embedding album art into {0}", album)
+ log.info("Embedding album art into {}", album)
for item in album.items():
embed_item(
@@ -143,8 +139,7 @@ def resize_image(log, imagepath, maxwidth, quality):
specified quality level.
"""
log.debug(
- "Resizing album art to {0} pixels wide and encoding at quality \
- level {1}",
+ "Resizing album art to {} pixels wide and encoding at quality level {}",
maxwidth,
quality,
)
@@ -184,18 +179,18 @@ def extract(log, outpath, item):
art = get_art(log, item)
outpath = bytestring_path(outpath)
if not art:
- log.info("No album art present in {0}, skipping.", item)
+ log.info("No album art present in {}, skipping.", item)
return
# Add an extension to the filename.
ext = mediafile.image_extension(art)
if not ext:
- log.warning("Unknown image type in {0}.", displayable_path(item.path))
+ log.warning("Unknown image type in {.filepath}.", item)
return
- outpath += bytestring_path("." + ext)
+ outpath += bytestring_path(f".{ext}")
log.info(
- "Extracting album art from: {0} to: {1}",
+ "Extracting album art from: {} to: {}",
item,
displayable_path(outpath),
)
@@ -213,7 +208,7 @@ def extract_first(log, outpath, items):
def clear(log, lib, query):
items = lib.items(query)
- log.info("Clearing album art from {0} items", len(items))
+ log.info("Clearing album art from {} items", len(items))
for item in items:
- log.debug("Clearing art for {0}", item)
+ log.debug("Clearing art for {}", item)
item.try_write(tags={"images": None})
diff --git a/beets/vfs.py b/beetsplug/_utils/vfs.py
similarity index 82%
rename from beets/vfs.py
rename to beetsplug/_utils/vfs.py
index 4fd133f5a..6294b644c 100644
--- a/beets/vfs.py
+++ b/beetsplug/_utils/vfs.py
@@ -16,17 +16,25 @@
libraries.
"""
-from typing import Any, NamedTuple
+from __future__ import annotations
+
+from typing import TYPE_CHECKING, NamedTuple
from beets import util
+if TYPE_CHECKING:
+ from beets.library import Library
+
class Node(NamedTuple):
- files: dict[str, Any]
- dirs: dict[str, Any]
+ files: dict[str, int]
+ # Maps filenames to Item ids.
+
+ dirs: dict[str, Node]
+ # Maps directory names to child nodes.
-def _insert(node, path, itemid):
+def _insert(node: Node, path: list[str], itemid: int):
"""Insert an item into a virtual filesystem node."""
if len(path) == 1:
# Last component. Insert file.
@@ -40,7 +48,7 @@ def _insert(node, path, itemid):
_insert(node.dirs[dirname], rest, itemid)
-def libtree(lib):
+def libtree(lib: Library) -> Node:
"""Generates a filesystem-like directory tree for the files
contained in `lib`. Filesystem nodes are (files, dirs) named
tuples in which both components are dictionaries. The first
diff --git a/beetsplug/absubmit.py b/beetsplug/absubmit.py
index c02a1c923..62a248482 100644
--- a/beetsplug/absubmit.py
+++ b/beetsplug/absubmit.py
@@ -42,9 +42,7 @@ def call(args):
try:
return util.command_output(args).stdout
except subprocess.CalledProcessError as e:
- raise ABSubmitError(
- "{} exited with status {}".format(args[0], e.returncode)
- )
+ raise ABSubmitError(f"{args[0]} exited with status {e.returncode}")
class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
@@ -63,9 +61,7 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
# Explicit path to extractor
if not os.path.isfile(self.extractor):
raise ui.UserError(
- "Extractor command does not exist: {0}.".format(
- self.extractor
- )
+ f"Extractor command does not exist: {self.extractor}."
)
else:
# Implicit path to extractor, search for it in path
@@ -101,8 +97,8 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
"with an HTTP scheme"
)
elif base_url[-1] != "/":
- base_url = base_url + "/"
- self.url = base_url + "{mbid}/low-level"
+ base_url = f"{base_url}/"
+ self.url = f"{base_url}{{mbid}}/low-level"
def commands(self):
cmd = ui.Subcommand(
@@ -122,8 +118,10 @@ class AcousticBrainzSubmitPlugin(plugins.BeetsPlugin):
dest="pretend_fetch",
action="store_true",
default=False,
- help="pretend to perform action, but show \
-only files which would be processed",
+ help=(
+ "pretend to perform action, but show only files which would be"
+ " processed"
+ ),
)
cmd.func = self.command
return [cmd]
diff --git a/beetsplug/acousticbrainz.py b/beetsplug/acousticbrainz.py
index 56ac0f6c5..92a1976a1 100644
--- a/beetsplug/acousticbrainz.py
+++ b/beetsplug/acousticbrainz.py
@@ -97,7 +97,7 @@ class AcousticPlugin(plugins.BeetsPlugin):
"with an HTTP scheme"
)
elif self.base_url[-1] != "/":
- self.base_url = self.base_url + "/"
+ self.base_url = f"{self.base_url}/"
if self.config["auto"]:
self.register_listener("import_task_files", self.import_task_files)
@@ -153,7 +153,7 @@ class AcousticPlugin(plugins.BeetsPlugin):
try:
data.update(res.json())
except ValueError:
- self._log.debug("Invalid Response: {}", res.text)
+ self._log.debug("Invalid Response: {.text}", res)
return {}
return data
@@ -300,4 +300,4 @@ class AcousticPlugin(plugins.BeetsPlugin):
def _generate_urls(base_url, mbid):
"""Generates AcousticBrainz end point urls for given `mbid`."""
for level in LEVELS:
- yield base_url + mbid + level
+ yield f"{base_url}{mbid}{level}"
diff --git a/beetsplug/aura.py b/beetsplug/aura.py
index 53458d7ee..7b75f31e5 100644
--- a/beetsplug/aura.py
+++ b/beetsplug/aura.py
@@ -236,14 +236,14 @@ class AURADocument:
# Not the last page so work out links.next url
if not self.args:
# No existing arguments, so current page is 0
- next_url = request.url + "?page=1"
+ next_url = f"{request.url}?page=1"
elif not self.args.get("page", None):
# No existing page argument, so add one to the end
- next_url = request.url + "&page=1"
+ next_url = f"{request.url}&page=1"
else:
# Increment page token by 1
next_url = request.url.replace(
- f"page={page}", "page={}".format(page + 1)
+ f"page={page}", f"page={page + 1}"
)
# Get only the items in the page range
data = [
@@ -427,9 +427,7 @@ class TrackDocument(AURADocument):
return self.error(
"404 Not Found",
"No track with the requested id.",
- "There is no track with an id of {} in the library.".format(
- track_id
- ),
+ f"There is no track with an id of {track_id} in the library.",
)
return self.single_resource_document(
self.get_resource_object(self.lib, track)
@@ -513,9 +511,7 @@ class AlbumDocument(AURADocument):
return self.error(
"404 Not Found",
"No album with the requested id.",
- "There is no album with an id of {} in the library.".format(
- album_id
- ),
+ f"There is no album with an id of {album_id} in the library.",
)
return self.single_resource_document(
self.get_resource_object(self.lib, album)
@@ -600,9 +596,7 @@ class ArtistDocument(AURADocument):
return self.error(
"404 Not Found",
"No artist with the requested id.",
- "There is no artist with an id of {} in the library.".format(
- artist_id
- ),
+ f"There is no artist with an id of {artist_id} in the library.",
)
return self.single_resource_document(artist_resource)
@@ -703,7 +697,7 @@ class ImageDocument(AURADocument):
relationships = {}
# Split id into [parent_type, parent_id, filename]
id_split = image_id.split("-")
- relationships[id_split[0] + "s"] = {
+ relationships[f"{id_split[0]}s"] = {
"data": [{"type": id_split[0], "id": id_split[1]}]
}
@@ -727,9 +721,7 @@ class ImageDocument(AURADocument):
return self.error(
"404 Not Found",
"No image with the requested id.",
- "There is no image with an id of {} in the library.".format(
- image_id
- ),
+ f"There is no image with an id of {image_id} in the library.",
)
return self.single_resource_document(image_resource)
@@ -775,9 +767,7 @@ def audio_file(track_id):
return AURADocument.error(
"404 Not Found",
"No track with the requested id.",
- "There is no track with an id of {} in the library.".format(
- track_id
- ),
+ f"There is no track with an id of {track_id} in the library.",
)
path = os.fsdecode(track.path)
@@ -785,9 +775,8 @@ def audio_file(track_id):
return AURADocument.error(
"404 Not Found",
"No audio file for the requested track.",
- (
- "There is no audio file for track {} at the expected location"
- ).format(track_id),
+ f"There is no audio file for track {track_id} at the expected"
+ " location",
)
file_mimetype = guess_type(path)[0]
@@ -795,10 +784,8 @@ def audio_file(track_id):
return AURADocument.error(
"500 Internal Server Error",
"Requested audio file has an unknown mimetype.",
- (
- "The audio file for track {} has an unknown mimetype. "
- "Its file extension is {}."
- ).format(track_id, path.split(".")[-1]),
+ f"The audio file for track {track_id} has an unknown mimetype. "
+ f"Its file extension is {path.split('.')[-1]}.",
)
# Check that the Accept header contains the file's mimetype
@@ -810,10 +797,8 @@ def audio_file(track_id):
return AURADocument.error(
"406 Not Acceptable",
"Unsupported MIME type or bitrate parameter in Accept header.",
- (
- "The audio file for track {} is only available as {} and "
- "bitrate parameters are not supported."
- ).format(track_id, file_mimetype),
+ f"The audio file for track {track_id} is only available as"
+ f" {file_mimetype} and bitrate parameters are not supported.",
)
return send_file(
@@ -896,9 +881,7 @@ def image_file(image_id):
return AURADocument.error(
"404 Not Found",
"No image with the requested id.",
- "There is no image with an id of {} in the library".format(
- image_id
- ),
+ f"There is no image with an id of {image_id} in the library",
)
return send_file(img_path)
diff --git a/beetsplug/badfiles.py b/beetsplug/badfiles.py
index 0511d960d..070008be8 100644
--- a/beetsplug/badfiles.py
+++ b/beetsplug/badfiles.py
@@ -110,9 +110,7 @@ class BadFiles(BeetsPlugin):
self._log.debug("checking path: {}", dpath)
if not os.path.exists(item.path):
ui.print_(
- "{}: file does not exist".format(
- ui.colorize("text_error", dpath)
- )
+ f"{ui.colorize('text_error', dpath)}: file does not exist"
)
# Run the checker against the file if one is found
@@ -129,37 +127,32 @@ class BadFiles(BeetsPlugin):
except CheckerCommandError as e:
if e.errno == errno.ENOENT:
self._log.error(
- "command not found: {} when validating file: {}",
- e.checker,
- e.path,
+ "command not found: {0.checker} when validating file: {0.path}",
+ e,
)
else:
- self._log.error("error invoking {}: {}", e.checker, e.msg)
+ self._log.error("error invoking {0.checker}: {0.msg}", e)
return []
error_lines = []
if status > 0:
error_lines.append(
- "{}: checker exited with status {}".format(
- ui.colorize("text_error", dpath), status
- )
+ f"{ui.colorize('text_error', dpath)}: checker exited with"
+ f" status {status}"
)
for line in output:
error_lines.append(f" {line}")
elif errors > 0:
error_lines.append(
- "{}: checker found {} errors or warnings".format(
- ui.colorize("text_warning", dpath), errors
- )
+ f"{ui.colorize('text_warning', dpath)}: checker found"
+ f" {status} errors or warnings"
)
for line in output:
error_lines.append(f" {line}")
elif self.verbose:
- error_lines.append(
- "{}: ok".format(ui.colorize("text_success", dpath))
- )
+ error_lines.append(f"{ui.colorize('text_success', dpath)}: ok")
return error_lines
@@ -180,9 +173,8 @@ class BadFiles(BeetsPlugin):
def on_import_task_before_choice(self, task, session):
if hasattr(task, "_badfiles_checks_failed"):
ui.print_(
- "{} one or more files failed checks:".format(
- ui.colorize("text_warning", "BAD")
- )
+ f"{ui.colorize('text_warning', 'BAD')} one or more files failed"
+ " checks:"
)
for error in task._badfiles_checks_failed:
for error_line in error:
diff --git a/beetsplug/beatport.py b/beetsplug/beatport.py
index 16e0dc896..c07cce72f 100644
--- a/beetsplug/beatport.py
+++ b/beetsplug/beatport.py
@@ -110,7 +110,7 @@ class BeatportClient:
:returns: OAuth resource owner key and secret as unicode
"""
self.api.parse_authorization_response(
- "https://beets.io/auth?" + auth_data
+ f"https://beets.io/auth?{auth_data}"
)
access_data = self.api.fetch_access_token(
self._make_url("/identity/1/oauth/access-token")
@@ -200,8 +200,8 @@ class BeatportClient:
def _make_url(self, endpoint: str) -> str:
"""Get complete URL for a given API endpoint."""
if not endpoint.startswith("/"):
- endpoint = "/" + endpoint
- return self._api_base + endpoint
+ endpoint = f"/{endpoint}"
+ return f"{self._api_base}{endpoint}"
def _get(self, endpoint: str, **kwargs) -> list[JSONDict]:
"""Perform a GET request on a given API endpoint.
@@ -212,14 +212,10 @@ class BeatportClient:
try:
response = self.api.get(self._make_url(endpoint), params=kwargs)
except Exception as e:
- raise BeatportAPIError(
- "Error connecting to Beatport API: {}".format(e)
- )
+ raise BeatportAPIError(f"Error connecting to Beatport API: {e}")
if not response:
raise BeatportAPIError(
- "Error {0.status_code} for '{0.request.path_url}".format(
- response
- )
+ f"Error {response.status_code} for '{response.request.path_url}"
)
return response.json()["results"]
@@ -275,15 +271,14 @@ class BeatportRelease(BeatportObject):
self.genre = data.get("genre")
if "slug" in data:
- self.url = "https://beatport.com/release/{}/{}".format(
- data["slug"], data["id"]
+ self.url = (
+ f"https://beatport.com/release/{data['slug']}/{data['id']}"
)
def __str__(self) -> str:
- return "".format(
- self.artists_str(),
- self.name,
- self.catalog_number,
+ return (
+ ""
)
@@ -311,9 +306,7 @@ class BeatportTrack(BeatportObject):
except ValueError:
pass
if "slug" in data:
- self.url = "https://beatport.com/track/{}/{}".format(
- data["slug"], data["id"]
- )
+ self.url = f"https://beatport.com/track/{data['slug']}/{data['id']}"
self.track_number = data.get("trackNumber")
self.bpm = data.get("bpm")
self.initial_key = str((data.get("key") or {}).get("shortName"))
@@ -335,7 +328,6 @@ class BeatportPlugin(MetadataSourcePlugin):
"apikey": "57713c3906af6f5def151b33601389176b37b429",
"apisecret": "b3fe08c93c80aefd749fe871a16cd2bb32e2b954",
"tokenfile": "beatport_token.json",
- "source_weight": 0.5,
}
)
self.config["apikey"].redact = True
@@ -373,7 +365,7 @@ class BeatportPlugin(MetadataSourcePlugin):
try:
url = auth_client.get_authorize_url()
except AUTH_ERRORS as e:
- self._log.debug("authentication error: {0}", e)
+ self._log.debug("authentication error: {}", e)
raise beets.ui.UserError("communication with Beatport failed")
beets.ui.print_("To authenticate with Beatport, visit:")
@@ -384,11 +376,11 @@ class BeatportPlugin(MetadataSourcePlugin):
try:
token, secret = auth_client.get_access_token(data)
except AUTH_ERRORS as e:
- self._log.debug("authentication error: {0}", e)
+ self._log.debug("authentication error: {}", e)
raise beets.ui.UserError("Beatport token request failed")
# Save the token for later use.
- self._log.debug("Beatport token {0}, secret {1}", token, secret)
+ self._log.debug("Beatport token {}, secret {}", token, secret)
with open(self._tokenfile(), "w") as f:
json.dump({"token": token, "secret": secret}, f)
@@ -412,7 +404,7 @@ class BeatportPlugin(MetadataSourcePlugin):
try:
yield from self._get_releases(query)
except BeatportAPIError as e:
- self._log.debug("API Error: {0} (query: {1})", e, query)
+ self._log.debug("API Error: {} (query: {})", e, query)
return
def item_candidates(
@@ -422,14 +414,14 @@ class BeatportPlugin(MetadataSourcePlugin):
try:
return self._get_tracks(query)
except BeatportAPIError as e:
- self._log.debug("API Error: {0} (query: {1})", e, query)
+ self._log.debug("API Error: {} (query: {})", e, query)
return []
def album_for_id(self, album_id: str):
"""Fetches a release by its Beatport ID and returns an AlbumInfo object
or None if the query is not a valid ID or release is not found.
"""
- self._log.debug("Searching for release {0}", album_id)
+ self._log.debug("Searching for release {}", album_id)
if not (release_id := self._extract_id(album_id)):
self._log.debug("Not a valid Beatport release ID.")
@@ -444,7 +436,7 @@ class BeatportPlugin(MetadataSourcePlugin):
"""Fetches a track by its Beatport ID and returns a TrackInfo object
or None if the track is not a valid Beatport ID or track is not found.
"""
- self._log.debug("Searching for track {0}", track_id)
+ self._log.debug("Searching for track {}", track_id)
# TODO: move to extractor
match = re.search(r"(^|beatport\.com/track/.+/)(\d+)$", track_id)
if not match:
diff --git a/beetsplug/bench.py b/beetsplug/bench.py
index cf72527e8..d77f1f92a 100644
--- a/beetsplug/bench.py
+++ b/beetsplug/bench.py
@@ -17,10 +17,11 @@
import cProfile
import timeit
-from beets import importer, library, plugins, ui, vfs
+from beets import importer, library, plugins, ui
from beets.autotag import match
from beets.plugins import BeetsPlugin
from beets.util.functemplate import Template
+from beetsplug._utils import vfs
def aunique_benchmark(lib, prof):
diff --git a/beetsplug/bpd/__init__.py b/beetsplug/bpd/__init__.py
index a2ad2835c..1a4f505dd 100644
--- a/beetsplug/bpd/__init__.py
+++ b/beetsplug/bpd/__init__.py
@@ -30,10 +30,11 @@ from typing import TYPE_CHECKING
import beets
import beets.ui
-from beets import dbcore, logging, vfs
+from beets import dbcore, logging
from beets.library import Item
from beets.plugins import BeetsPlugin
from beets.util import as_string, bluelet
+from beetsplug._utils import vfs
if TYPE_CHECKING:
from beets.dbcore.query import Query
@@ -52,7 +53,7 @@ except ImportError as e:
PROTOCOL_VERSION = "0.16.0"
BUFSIZE = 1024
-HELLO = "OK MPD %s" % PROTOCOL_VERSION
+HELLO = f"OK MPD {PROTOCOL_VERSION}"
CLIST_BEGIN = "command_list_begin"
CLIST_VERBOSE_BEGIN = "command_list_ok_begin"
CLIST_END = "command_list_end"
@@ -282,7 +283,7 @@ class BaseServer:
if not self.ctrl_sock:
self.ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.ctrl_sock.connect((self.ctrl_host, self.ctrl_port))
- self.ctrl_sock.sendall((message + "\n").encode("utf-8"))
+ self.ctrl_sock.sendall((f"{message}\n").encode("utf-8"))
def _send_event(self, event):
"""Notify subscribed connections of an event."""
@@ -376,13 +377,13 @@ class BaseServer:
if self.password and not conn.authenticated:
# Not authenticated. Show limited list of commands.
for cmd in SAFE_COMMANDS:
- yield "command: " + cmd
+ yield f"command: {cmd}"
else:
# Authenticated. Show all commands.
for func in dir(self):
if func.startswith("cmd_"):
- yield "command: " + func[4:]
+ yield f"command: {func[4:]}"
def cmd_notcommands(self, conn):
"""Lists all unavailable commands."""
@@ -392,7 +393,7 @@ class BaseServer:
if func.startswith("cmd_"):
cmd = func[4:]
if cmd not in SAFE_COMMANDS:
- yield "command: " + cmd
+ yield f"command: {cmd}"
else:
# Authenticated. No commands are unavailable.
@@ -406,22 +407,22 @@ class BaseServer:
playlist, playlistlength, and xfade.
"""
yield (
- "repeat: " + str(int(self.repeat)),
- "random: " + str(int(self.random)),
- "consume: " + str(int(self.consume)),
- "single: " + str(int(self.single)),
- "playlist: " + str(self.playlist_version),
- "playlistlength: " + str(len(self.playlist)),
- "mixrampdb: " + str(self.mixrampdb),
+ f"repeat: {int(self.repeat)}",
+ f"random: {int(self.random)}",
+ f"consume: {int(self.consume)}",
+ f"single: {int(self.single)}",
+ f"playlist: {self.playlist_version}",
+ f"playlistlength: {len(self.playlist)}",
+ f"mixrampdb: {self.mixrampdb}",
)
if self.volume > 0:
- yield "volume: " + str(self.volume)
+ yield f"volume: {self.volume}"
if not math.isnan(self.mixrampdelay):
- yield "mixrampdelay: " + str(self.mixrampdelay)
+ yield f"mixrampdelay: {self.mixrampdelay}"
if self.crossfade > 0:
- yield "xfade: " + str(self.crossfade)
+ yield f"xfade: {self.crossfade}"
if self.current_index == -1:
state = "stop"
@@ -429,20 +430,20 @@ class BaseServer:
state = "pause"
else:
state = "play"
- yield "state: " + state
+ yield f"state: {state}"
if self.current_index != -1: # i.e., paused or playing
current_id = self._item_id(self.playlist[self.current_index])
- yield "song: " + str(self.current_index)
- yield "songid: " + str(current_id)
+ yield f"song: {self.current_index}"
+ yield f"songid: {current_id}"
if len(self.playlist) > self.current_index + 1:
# If there's a next song, report its index too.
next_id = self._item_id(self.playlist[self.current_index + 1])
- yield "nextsong: " + str(self.current_index + 1)
- yield "nextsongid: " + str(next_id)
+ yield f"nextsong: {self.current_index + 1}"
+ yield f"nextsongid: {next_id}"
if self.error:
- yield "error: " + self.error
+ yield f"error: {self.error}"
def cmd_clearerror(self, conn):
"""Removes the persistent error state of the server. This
@@ -522,7 +523,7 @@ class BaseServer:
def cmd_replay_gain_status(self, conn):
"""Get the replaygain mode."""
- yield "replay_gain_mode: " + str(self.replay_gain_mode)
+ yield f"replay_gain_mode: {self.replay_gain_mode}"
def cmd_clear(self, conn):
"""Clear the playlist."""
@@ -643,8 +644,8 @@ class BaseServer:
Also a dummy implementation.
"""
for idx, track in enumerate(self.playlist):
- yield "cpos: " + str(idx)
- yield "Id: " + str(track.id)
+ yield f"cpos: {idx}"
+ yield f"Id: {track.id}"
def cmd_currentsong(self, conn):
"""Sends information about the currently-playing song."""
@@ -759,11 +760,11 @@ class Connection:
"""Create a new connection for the accepted socket `client`."""
self.server = server
self.sock = sock
- self.address = "{}:{}".format(*sock.sock.getpeername())
+ self.address = ":".join(map(str, sock.sock.getpeername()))
def debug(self, message, kind=" "):
"""Log a debug message about this connection."""
- self.server._log.debug("{}[{}]: {}", kind, self.address, message)
+ self.server._log.debug("{}[{.address}]: {}", kind, self, message)
def run(self):
pass
@@ -899,9 +900,7 @@ class MPDConnection(Connection):
return
except BPDIdleError as e:
self.idle_subscriptions = e.subsystems
- self.debug(
- "awaiting: {}".format(" ".join(e.subsystems)), kind="z"
- )
+ self.debug(f"awaiting: {' '.join(e.subsystems)}", kind="z")
yield bluelet.call(self.server.dispatch_events())
@@ -913,7 +912,7 @@ class ControlConnection(Connection):
super().__init__(server, sock)
def debug(self, message, kind=" "):
- self.server._log.debug("CTRL {}[{}]: {}", kind, self.address, message)
+ self.server._log.debug("CTRL {}[{.address}]: {}", kind, self, message)
def run(self):
"""Listen for control commands and delegate to `ctrl_*` methods."""
@@ -933,7 +932,7 @@ class ControlConnection(Connection):
func = command.delegate("ctrl_", self)
yield bluelet.call(func(*command.args))
except (AttributeError, TypeError) as e:
- yield self.send("ERROR: {}".format(e.args[0]))
+ yield self.send(f"ERROR: {e.args[0]}")
except Exception:
yield self.send(
["ERROR: server error", traceback.format_exc().rstrip()]
@@ -992,7 +991,7 @@ class Command:
of arguments.
"""
# Attempt to get correct command function.
- func_name = prefix + self.name
+ func_name = f"{prefix}{self.name}"
if not hasattr(target, func_name):
raise AttributeError(f'unknown command "{self.name}"')
func = getattr(target, func_name)
@@ -1011,7 +1010,7 @@ class Command:
# If the command accepts a variable number of arguments skip the check.
if wrong_num and not argspec.varargs:
raise TypeError(
- 'wrong number of arguments for "{}"'.format(self.name),
+ f'wrong number of arguments for "{self.name}"',
self.name,
)
@@ -1110,10 +1109,8 @@ class Server(BaseServer):
self.lib = library
self.player = gstplayer.GstPlayer(self.play_finished)
self.cmd_update(None)
- log.info("Server ready and listening on {}:{}".format(host, port))
- log.debug(
- "Listening for control signals on {}:{}".format(host, ctrl_port)
- )
+ log.info("Server ready and listening on {}:{}", host, port)
+ log.debug("Listening for control signals on {}:{}", host, ctrl_port)
def run(self):
self.player.run()
@@ -1128,23 +1125,21 @@ class Server(BaseServer):
def _item_info(self, item):
info_lines = [
- "file: " + as_string(item.destination(relative_to_libdir=True)),
- "Time: " + str(int(item.length)),
- "duration: " + f"{item.length:.3f}",
- "Id: " + str(item.id),
+ f"file: {as_string(item.destination(relative_to_libdir=True))}",
+ f"Time: {int(item.length)}",
+ "duration: {item.length:.3f}",
+ f"Id: {item.id}",
]
try:
pos = self._id_to_index(item.id)
- info_lines.append("Pos: " + str(pos))
+ info_lines.append(f"Pos: {pos}")
except ArgumentNotFoundError:
# Don't include position if not in playlist.
pass
for tagtype, field in self.tagtype_map.items():
- info_lines.append(
- "{}: {}".format(tagtype, str(getattr(item, field)))
- )
+ info_lines.append(f"{tagtype}: {getattr(item, field)}")
return info_lines
@@ -1207,7 +1202,7 @@ class Server(BaseServer):
def _path_join(self, p1, p2):
"""Smashes together two BPD paths."""
- out = p1 + "/" + p2
+ out = f"{p1}/{p2}"
return out.replace("//", "/").replace("//", "/")
def cmd_lsinfo(self, conn, path="/"):
@@ -1225,7 +1220,7 @@ class Server(BaseServer):
if dirpath.startswith("/"):
# Strip leading slash (libmpc rejects this).
dirpath = dirpath[1:]
- yield "directory: %s" % dirpath
+ yield f"directory: {dirpath}"
def _listall(self, basepath, node, info=False):
"""Helper function for recursive listing. If info, show
@@ -1237,7 +1232,7 @@ class Server(BaseServer):
item = self.lib.get_item(node)
yield self._item_info(item)
else:
- yield "file: " + basepath
+ yield f"file: {basepath}"
else:
# List a directory. Recurse into both directories and files.
for name, itemid in sorted(node.files.items()):
@@ -1246,7 +1241,7 @@ class Server(BaseServer):
yield from self._listall(newpath, itemid, info)
for name, subdir in sorted(node.dirs.items()):
newpath = self._path_join(basepath, name)
- yield "directory: " + newpath
+ yield f"directory: {newpath}"
yield from self._listall(newpath, subdir, info)
def cmd_listall(self, conn, path="/"):
@@ -1280,7 +1275,7 @@ class Server(BaseServer):
for item in self._all_items(self._resolve_path(path)):
self.playlist.append(item)
if send_id:
- yield "Id: " + str(item.id)
+ yield f"Id: {item.id}"
self.playlist_version += 1
self._send_event("playlist")
@@ -1302,20 +1297,13 @@ class Server(BaseServer):
item = self.playlist[self.current_index]
yield (
- "bitrate: " + str(item.bitrate / 1000),
- "audio: {}:{}:{}".format(
- str(item.samplerate),
- str(item.bitdepth),
- str(item.channels),
- ),
+ f"bitrate: {item.bitrate / 1000}",
+ f"audio: {item.samplerate}:{item.bitdepth}:{item.channels}",
)
(pos, total) = self.player.time()
yield (
- "time: {}:{}".format(
- str(int(pos)),
- str(int(total)),
- ),
+ f"time: {int(pos)}:{int(total)}",
"elapsed: " + f"{pos:.3f}",
"duration: " + f"{total:.3f}",
)
@@ -1335,13 +1323,13 @@ class Server(BaseServer):
artists, albums, songs, totaltime = tx.query(statement)[0]
yield (
- "artists: " + str(artists),
- "albums: " + str(albums),
- "songs: " + str(songs),
- "uptime: " + str(int(time.time() - self.startup_time)),
- "playtime: " + "0", # Missing.
- "db_playtime: " + str(int(totaltime)),
- "db_update: " + str(int(self.updated_time)),
+ f"artists: {artists}",
+ f"albums: {albums}",
+ f"songs: {songs}",
+ f"uptime: {int(time.time() - self.startup_time)}",
+ "playtime: 0", # Missing.
+ f"db_playtime: {int(totaltime)}",
+ f"db_update: {int(self.updated_time)}",
)
def cmd_decoders(self, conn):
@@ -1383,7 +1371,7 @@ class Server(BaseServer):
searching.
"""
for tag in self.tagtype_map:
- yield "tagtype: " + tag
+ yield f"tagtype: {tag}"
def _tagtype_lookup(self, tag):
"""Uses `tagtype_map` to look up the beets column name for an
@@ -1458,12 +1446,9 @@ class Server(BaseServer):
clause, subvals = query.clause()
statement = (
- "SELECT DISTINCT "
- + show_key
- + " FROM items WHERE "
- + clause
- + " ORDER BY "
- + show_key
+ f"SELECT DISTINCT {show_key}"
+ f" FROM items WHERE {clause}"
+ f" ORDER BY {show_key}"
)
self._log.debug(statement)
with self.lib.transaction() as tx:
@@ -1473,7 +1458,7 @@ class Server(BaseServer):
if not row[0]:
# Skip any empty values of the field.
continue
- yield show_tag_canon + ": " + str(row[0])
+ yield f"{show_tag_canon}: {row[0]}"
def cmd_count(self, conn, tag, value):
"""Returns the number and total time of songs matching the
@@ -1487,8 +1472,8 @@ class Server(BaseServer):
):
songs += 1
playtime += item.length
- yield "songs: " + str(songs)
- yield "playtime: " + str(int(playtime))
+ yield f"songs: {songs}"
+ yield f"playtime: {int(playtime)}"
# Persistent playlist manipulation. In MPD this is an optional feature so
# these dummy implementations match MPD's behaviour with the feature off.
diff --git a/beetsplug/bpd/gstplayer.py b/beetsplug/bpd/gstplayer.py
index 03fb179aa..fa23f2b0e 100644
--- a/beetsplug/bpd/gstplayer.py
+++ b/beetsplug/bpd/gstplayer.py
@@ -129,7 +129,7 @@ class GstPlayer:
self.player.set_state(Gst.State.NULL)
if isinstance(path, str):
path = path.encode("utf-8")
- uri = "file://" + urllib.parse.quote(path)
+ uri = f"file://{urllib.parse.quote(path)}"
self.player.set_property("uri", uri)
self.player.set_state(Gst.State.PLAYING)
self.playing = True
diff --git a/beetsplug/bpm.py b/beetsplug/bpm.py
index 145986a95..d49963b72 100644
--- a/beetsplug/bpm.py
+++ b/beetsplug/bpm.py
@@ -73,12 +73,12 @@ class BPMPlugin(BeetsPlugin):
item = items[0]
if item["bpm"]:
- self._log.info("Found bpm {0}", item["bpm"])
+ self._log.info("Found bpm {}", item["bpm"])
if not overwrite:
return
self._log.info(
- "Press Enter {0} times to the rhythm or Ctrl-D to exit",
+ "Press Enter {} times to the rhythm or Ctrl-D to exit",
self.config["max_strokes"].get(int),
)
new_bpm = bpm(self.config["max_strokes"].get(int))
@@ -86,4 +86,4 @@ class BPMPlugin(BeetsPlugin):
if write:
item.try_write()
item.store()
- self._log.info("Added new bpm {0}", item["bpm"])
+ self._log.info("Added new bpm {}", item["bpm"])
diff --git a/beetsplug/bpsync.py b/beetsplug/bpsync.py
index ccd781b28..9ae6d47d5 100644
--- a/beetsplug/bpsync.py
+++ b/beetsplug/bpsync.py
@@ -82,8 +82,8 @@ class BPSyncPlugin(BeetsPlugin):
if not self.is_beatport_track(item):
self._log.info(
- "Skipping non-{} singleton: {}",
- self.beatport_plugin.data_source,
+ "Skipping non-{.beatport_plugin.data_source} singleton: {}",
+ self,
item,
)
continue
@@ -107,8 +107,8 @@ class BPSyncPlugin(BeetsPlugin):
return False
if not album.mb_albumid.isnumeric():
self._log.info(
- "Skipping album with invalid {} ID: {}",
- self.beatport_plugin.data_source,
+ "Skipping album with invalid {.beatport_plugin.data_source} ID: {}",
+ self,
album,
)
return False
@@ -117,8 +117,8 @@ class BPSyncPlugin(BeetsPlugin):
return items
if not all(self.is_beatport_track(item) for item in items):
self._log.info(
- "Skipping non-{} release: {}",
- self.beatport_plugin.data_source,
+ "Skipping non-{.beatport_plugin.data_source} release: {}",
+ self,
album,
)
return False
@@ -139,9 +139,7 @@ class BPSyncPlugin(BeetsPlugin):
albuminfo = self.beatport_plugin.album_for_id(album.mb_albumid)
if not albuminfo:
self._log.info(
- "Release ID {} not found for album {}",
- album.mb_albumid,
- album,
+ "Release ID {0.mb_albumid} not found for album {0}", album
)
continue
diff --git a/beetsplug/bucket.py b/beetsplug/bucket.py
index 9246539fc..40369f74a 100644
--- a/beetsplug/bucket.py
+++ b/beetsplug/bucket.py
@@ -41,7 +41,7 @@ def span_from_str(span_str):
def normalize_year(d, yearfrom):
"""Convert string to a 4 digits year"""
if yearfrom < 100:
- raise BucketError("%d must be expressed on 4 digits" % yearfrom)
+ raise BucketError(f"{yearfrom} must be expressed on 4 digits")
# if two digits only, pick closest year that ends by these two
# digits starting from yearfrom
@@ -55,14 +55,13 @@ def span_from_str(span_str):
years = [int(x) for x in re.findall(r"\d+", span_str)]
if not years:
raise ui.UserError(
- "invalid range defined for year bucket '%s': no "
- "year found" % span_str
+ f"invalid range defined for year bucket {span_str!r}: no year found"
)
try:
years = [normalize_year(x, years[0]) for x in years]
except BucketError as exc:
raise ui.UserError(
- "invalid range defined for year bucket '%s': %s" % (span_str, exc)
+ f"invalid range defined for year bucket {span_str!r}: {exc}"
)
res = {"from": years[0], "str": span_str}
@@ -125,22 +124,19 @@ def str2fmt(s):
"fromnchars": len(m.group("fromyear")),
"tonchars": len(m.group("toyear")),
}
- res["fmt"] = "{}%s{}{}{}".format(
- m.group("bef"),
- m.group("sep"),
- "%s" if res["tonchars"] else "",
- m.group("after"),
+ res["fmt"] = (
+ f"{m['bef']}{{}}{m['sep']}{'{}' if res['tonchars'] else ''}{m['after']}"
)
return res
def format_span(fmt, yearfrom, yearto, fromnchars, tonchars):
"""Return a span string representation."""
- args = str(yearfrom)[-fromnchars:]
+ args = [str(yearfrom)[-fromnchars:]]
if tonchars:
- args = (str(yearfrom)[-fromnchars:], str(yearto)[-tonchars:])
+ args.append(str(yearto)[-tonchars:])
- return fmt % args
+ return fmt.format(*args)
def extract_modes(spans):
@@ -169,14 +165,12 @@ def build_alpha_spans(alpha_spans_str, alpha_regexs):
else:
raise ui.UserError(
"invalid range defined for alpha bucket "
- "'%s': no alphanumeric character found" % elem
+ f"'{elem}': no alphanumeric character found"
)
spans.append(
re.compile(
- "^["
- + ASCII_DIGITS[begin_index : end_index + 1]
- + ASCII_DIGITS[begin_index : end_index + 1].upper()
- + "]"
+ rf"^[{ASCII_DIGITS[begin_index : end_index + 1]}]",
+ re.IGNORECASE,
)
)
return spans
diff --git a/beetsplug/chroma.py b/beetsplug/chroma.py
index f90877113..192310fb8 100644
--- a/beetsplug/chroma.py
+++ b/beetsplug/chroma.py
@@ -90,7 +90,7 @@ def acoustid_match(log, path):
duration, fp = acoustid.fingerprint_file(util.syspath(path))
except acoustid.FingerprintGenerationError as exc:
log.error(
- "fingerprinting of {0} failed: {1}",
+ "fingerprinting of {} failed: {}",
util.displayable_path(repr(path)),
exc,
)
@@ -98,15 +98,17 @@ def acoustid_match(log, path):
fp = fp.decode()
_fingerprints[path] = fp
try:
- res = acoustid.lookup(API_KEY, fp, duration, meta="recordings releases")
+ res = acoustid.lookup(
+ API_KEY, fp, duration, meta="recordings releases", timeout=10
+ )
except acoustid.AcoustidError as exc:
log.debug(
- "fingerprint matching {0} failed: {1}",
+ "fingerprint matching {} failed: {}",
util.displayable_path(repr(path)),
exc,
)
return None
- log.debug("chroma: fingerprinted {0}", util.displayable_path(repr(path)))
+ log.debug("chroma: fingerprinted {}", util.displayable_path(repr(path)))
# Ensure the response is usable and parse it.
if res["status"] != "ok" or not res.get("results"):
@@ -144,7 +146,7 @@ def acoustid_match(log, path):
release_ids = [rel["id"] for rel in releases]
log.debug(
- "matched recordings {0} on releases {1}", recording_ids, release_ids
+ "matched recordings {} on releases {}", recording_ids, release_ids
)
_matches[path] = recording_ids, release_ids
@@ -209,7 +211,7 @@ class AcoustidPlugin(MetadataSourcePlugin):
if album:
albums.append(album)
- self._log.debug("acoustid album candidates: {0}", len(albums))
+ self._log.debug("acoustid album candidates: {}", len(albums))
return albums
def item_candidates(self, item, artist, title) -> Iterable[TrackInfo]:
@@ -222,7 +224,7 @@ class AcoustidPlugin(MetadataSourcePlugin):
track = self.mb.track_for_id(recording_id)
if track:
tracks.append(track)
- self._log.debug("acoustid item candidates: {0}", len(tracks))
+ self._log.debug("acoustid item candidates: {}", len(tracks))
return tracks
def album_for_id(self, *args, **kwargs):
@@ -290,11 +292,11 @@ def submit_items(log, userkey, items, chunksize=64):
def submit_chunk():
"""Submit the current accumulated fingerprint data."""
- log.info("submitting {0} fingerprints", len(data))
+ log.info("submitting {} fingerprints", len(data))
try:
- acoustid.submit(API_KEY, userkey, data)
+ acoustid.submit(API_KEY, userkey, data, timeout=10)
except acoustid.AcoustidError as exc:
- log.warning("acoustid submission error: {0}", exc)
+ log.warning("acoustid submission error: {}", exc)
del data[:]
for item in items:
@@ -341,31 +343,23 @@ def fingerprint_item(log, item, write=False):
"""
# Get a fingerprint and length for this track.
if not item.length:
- log.info("{0}: no duration available", util.displayable_path(item.path))
+ log.info("{.filepath}: no duration available", item)
elif item.acoustid_fingerprint:
if write:
- log.info(
- "{0}: fingerprint exists, skipping",
- util.displayable_path(item.path),
- )
+ log.info("{.filepath}: fingerprint exists, skipping", item)
else:
- log.info(
- "{0}: using existing fingerprint",
- util.displayable_path(item.path),
- )
+ log.info("{.filepath}: using existing fingerprint", item)
return item.acoustid_fingerprint
else:
- log.info("{0}: fingerprinting", util.displayable_path(item.path))
+ log.info("{.filepath}: fingerprinting", item)
try:
_, fp = acoustid.fingerprint_file(util.syspath(item.path))
item.acoustid_fingerprint = fp.decode()
if write:
- log.info(
- "{0}: writing fingerprint", util.displayable_path(item.path)
- )
+ log.info("{.filepath}: writing fingerprint", item)
item.try_write()
if item._db:
item.store()
return item.acoustid_fingerprint
except acoustid.FingerprintGenerationError as exc:
- log.info("fingerprint generation failed: {0}", exc)
+ log.info("fingerprint generation failed: {}", exc)
diff --git a/beetsplug/convert.py b/beetsplug/convert.py
index c4df9ab57..e72f8c75a 100644
--- a/beetsplug/convert.py
+++ b/beetsplug/convert.py
@@ -25,12 +25,13 @@ from string import Template
import mediafile
from confuse import ConfigTypeError, Optional
-from beets import art, config, plugins, ui, util
+from beets import config, plugins, ui, util
from beets.library import Item, parse_query_string
from beets.plugins import BeetsPlugin
from beets.util import par_map
from beets.util.artresizer import ArtResizer
from beets.util.m3u import M3UFile
+from beetsplug._utils import art
_fs_lock = threading.Lock()
_temp_files = [] # Keep track of temporary transcoded files for deletion.
@@ -64,9 +65,7 @@ def get_format(fmt=None):
command = format_info["command"]
extension = format_info.get("extension", fmt)
except KeyError:
- raise ui.UserError(
- 'convert: format {} needs the "command" field'.format(fmt)
- )
+ raise ui.UserError(f'convert: format {fmt} needs the "command" field')
except ConfigTypeError:
command = config["convert"]["formats"][fmt].get(str)
extension = fmt
@@ -77,8 +76,8 @@ def get_format(fmt=None):
command = config["convert"]["command"].as_str()
elif "opts" in keys:
# Undocumented option for backwards compatibility with < 1.3.1.
- command = "ffmpeg -i $source -y {} $dest".format(
- config["convert"]["opts"].as_str()
+ command = (
+ f"ffmpeg -i $source -y {config['convert']['opts'].as_str()} $dest"
)
if "extension" in keys:
extension = config["convert"]["extension"].as_str()
@@ -123,20 +122,28 @@ class ConvertPlugin(BeetsPlugin):
"threads": os.cpu_count(),
"format": "mp3",
"id3v23": "inherit",
+ "write_metadata": True,
"formats": {
"aac": {
- "command": "ffmpeg -i $source -y -vn -acodec aac "
- "-aq 1 $dest",
+ "command": (
+ "ffmpeg -i $source -y -vn -acodec aac -aq 1 $dest"
+ ),
"extension": "m4a",
},
"alac": {
- "command": "ffmpeg -i $source -y -vn -acodec alac $dest",
+ "command": (
+ "ffmpeg -i $source -y -vn -acodec alac $dest"
+ ),
"extension": "m4a",
},
"flac": "ffmpeg -i $source -y -vn -acodec flac $dest",
"mp3": "ffmpeg -i $source -y -vn -aq 2 $dest",
- "opus": "ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest",
- "ogg": "ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest",
+ "opus": (
+ "ffmpeg -i $source -y -vn -acodec libopus -ab 96k $dest"
+ ),
+ "ogg": (
+ "ffmpeg -i $source -y -vn -acodec libvorbis -aq 3 $dest"
+ ),
"wma": "ffmpeg -i $source -y -vn -acodec wmav2 -vn $dest",
},
"max_bitrate": None,
@@ -171,16 +178,17 @@ class ConvertPlugin(BeetsPlugin):
"--threads",
action="store",
type="int",
- help="change the number of threads, \
- defaults to maximum available processors",
+ help=(
+ "change the number of threads, defaults to maximum available"
+ " processors"
+ ),
)
cmd.parser.add_option(
"-k",
"--keep-new",
action="store_true",
dest="keep_new",
- help="keep only the converted \
- and move the old files",
+ help="keep only the converted and move the old files",
)
cmd.parser.add_option(
"-d", "--dest", action="store", help="set the destination directory"
@@ -204,16 +212,16 @@ class ConvertPlugin(BeetsPlugin):
"--link",
action="store_true",
dest="link",
- help="symlink files that do not \
- need transcoding.",
+ help="symlink files that do not need transcoding.",
)
cmd.parser.add_option(
"-H",
"--hardlink",
action="store_true",
dest="hardlink",
- help="hardlink files that do not \
- need transcoding. Overrides --link.",
+ help=(
+ "hardlink files that do not need transcoding. Overrides --link."
+ ),
)
cmd.parser.add_option(
"-m",
@@ -282,7 +290,7 @@ class ConvertPlugin(BeetsPlugin):
quiet = self.config["quiet"].get(bool)
if not quiet and not pretend:
- self._log.info("Encoding {0}", util.displayable_path(source))
+ self._log.info("Encoding {}", util.displayable_path(source))
command = os.fsdecode(command)
source = os.fsdecode(source)
@@ -301,7 +309,7 @@ class ConvertPlugin(BeetsPlugin):
encode_cmd.append(os.fsdecode(args[i]))
if pretend:
- self._log.info("{0}", " ".join(args))
+ self._log.info("{}", " ".join(args))
return
try:
@@ -309,26 +317,25 @@ class ConvertPlugin(BeetsPlugin):
except subprocess.CalledProcessError as exc:
# Something went wrong (probably Ctrl+C), remove temporary files
self._log.info(
- "Encoding {0} failed. Cleaning up...",
+ "Encoding {} failed. Cleaning up...",
util.displayable_path(source),
)
self._log.debug(
- "Command {0} exited with status {1}: {2}",
+ "Command {0} exited with status {1.returncode}: {1.output}",
args,
- exc.returncode,
- exc.output,
+ exc,
)
util.remove(dest)
util.prune_dirs(os.path.dirname(dest))
raise
except OSError as exc:
raise ui.UserError(
- "convert: couldn't invoke '{}': {}".format(" ".join(args), exc)
+ f"convert: couldn't invoke {' '.join(args)!r}: {exc}"
)
if not quiet and not pretend:
self._log.info(
- "Finished encoding {0}", util.displayable_path(source)
+ "Finished encoding {}", util.displayable_path(source)
)
def convert_item(
@@ -356,7 +363,7 @@ class ConvertPlugin(BeetsPlugin):
try:
mediafile.MediaFile(util.syspath(item.path))
except mediafile.UnreadableFileError as exc:
- self._log.error("Could not open file to convert: {0}", exc)
+ self._log.error("Could not open file to convert: {}", exc)
continue
# When keeping the new file in the library, we first move the
@@ -382,21 +389,20 @@ class ConvertPlugin(BeetsPlugin):
if os.path.exists(util.syspath(dest)):
self._log.info(
- "Skipping {0} (target file exists)",
- util.displayable_path(item.path),
+ "Skipping {.filepath} (target file exists)", item
)
continue
if keep_new:
if pretend:
self._log.info(
- "mv {0} {1}",
- util.displayable_path(item.path),
+ "mv {.filepath} {}",
+ item,
util.displayable_path(original),
)
else:
self._log.info(
- "Moving to {0}", util.displayable_path(original)
+ "Moving to {}", util.displayable_path(original)
)
util.move(item.path, original)
@@ -412,10 +418,10 @@ class ConvertPlugin(BeetsPlugin):
msg = "ln" if hardlink else ("ln -s" if link else "cp")
self._log.info(
- "{2} {0} {1}",
+ "{} {} {}",
+ msg,
util.displayable_path(original),
util.displayable_path(converted),
- msg,
)
else:
# No transcoding necessary.
@@ -425,9 +431,7 @@ class ConvertPlugin(BeetsPlugin):
else ("Linking" if link else "Copying")
)
- self._log.info(
- "{1} {0}", util.displayable_path(item.path), msg
- )
+ self._log.info("{} {.filepath}", msg, item)
if hardlink:
util.hardlink(original, converted)
@@ -443,8 +447,9 @@ class ConvertPlugin(BeetsPlugin):
if id3v23 == "inherit":
id3v23 = None
- # Write tags from the database to the converted file.
- item.try_write(path=converted, id3v23=id3v23)
+ # Write tags from the database to the file if requested
+ if self.config["write_metadata"].get(bool):
+ item.try_write(path=converted, id3v23=id3v23)
if keep_new:
# If we're keeping the transcoded file, read it again (after
@@ -458,8 +463,7 @@ class ConvertPlugin(BeetsPlugin):
if album and album.artpath:
maxwidth = self._get_art_resize(album.artpath)
self._log.debug(
- "embedding album art from {}",
- util.displayable_path(album.artpath),
+ "embedding album art from {.art_filepath}", album
)
art.embed_item(
self._log,
@@ -517,8 +521,7 @@ class ConvertPlugin(BeetsPlugin):
if os.path.exists(util.syspath(dest)):
self._log.info(
- "Skipping {0} (target file exists)",
- util.displayable_path(album.artpath),
+ "Skipping {.art_filepath} (target file exists)", album
)
return
@@ -528,8 +531,8 @@ class ConvertPlugin(BeetsPlugin):
# Either copy or resize (while copying) the image.
if maxwidth is not None:
self._log.info(
- "Resizing cover art from {0} to {1}",
- util.displayable_path(album.artpath),
+ "Resizing cover art from {.art_filepath} to {}",
+ album,
util.displayable_path(dest),
)
if not pretend:
@@ -539,10 +542,10 @@ class ConvertPlugin(BeetsPlugin):
msg = "ln" if hardlink else ("ln -s" if link else "cp")
self._log.info(
- "{2} {0} {1}",
- util.displayable_path(album.artpath),
- util.displayable_path(dest),
+ "{} {.art_filepath} {}",
msg,
+ album,
+ util.displayable_path(dest),
)
else:
msg = (
@@ -552,10 +555,10 @@ class ConvertPlugin(BeetsPlugin):
)
self._log.info(
- "{2} cover art from {0} to {1}",
- util.displayable_path(album.artpath),
- util.displayable_path(dest),
+ "{} cover art from {.art_filepath} to {}",
msg,
+ album,
+ util.displayable_path(dest),
)
if hardlink:
util.hardlink(album.artpath, dest)
@@ -616,7 +619,7 @@ class ConvertPlugin(BeetsPlugin):
# Playlist paths are understood as relative to the dest directory.
pl_normpath = util.normpath(playlist)
pl_dir = os.path.dirname(pl_normpath)
- self._log.info("Creating playlist file {0}", pl_normpath)
+ self._log.info("Creating playlist file {}", pl_normpath)
# Generates a list of paths to media files, ensures the paths are
# relative to the playlist's location and translates the unicode
# strings we get from item.destination to bytes.
@@ -644,7 +647,7 @@ class ConvertPlugin(BeetsPlugin):
tmpdir = self.config["tmpdir"].get()
if tmpdir:
tmpdir = os.fsdecode(util.bytestring_path(tmpdir))
- fd, dest = tempfile.mkstemp(os.fsdecode(b"." + ext), dir=tmpdir)
+ fd, dest = tempfile.mkstemp(f".{os.fsdecode(ext)}", dir=tmpdir)
os.close(fd)
dest = util.bytestring_path(dest)
_temp_files.append(dest) # Delete the transcode later.
@@ -666,7 +669,7 @@ class ConvertPlugin(BeetsPlugin):
if self.config["delete_originals"]:
self._log.log(
logging.DEBUG if self.config["quiet"] else logging.INFO,
- "Removing original file {0}",
+ "Removing original file {}",
source_path,
)
util.remove(source_path, False)
diff --git a/beetsplug/deezer.py b/beetsplug/deezer.py
index 8815e3d59..3eaca1e05 100644
--- a/beetsplug/deezer.py
+++ b/beetsplug/deezer.py
@@ -21,7 +21,6 @@ import time
from typing import TYPE_CHECKING, Literal, Sequence
import requests
-import unidecode
from beets import ui
from beets.autotag import AlbumInfo, TrackInfo
@@ -50,6 +49,9 @@ class DeezerPlugin(SearchApiMetadataSourcePlugin[IDResponse]):
album_url = "https://api.deezer.com/album/"
track_url = "https://api.deezer.com/track/"
+ def __init__(self) -> None:
+ super().__init__()
+
def commands(self):
"""Add beet UI commands to interact with Deezer."""
deezer_update_cmd = ui.Subcommand(
@@ -97,7 +99,7 @@ class DeezerPlugin(SearchApiMetadataSourcePlugin[IDResponse]):
f"Invalid `release_date` returned by {self.data_source} API: "
f"{release_date!r}"
)
- tracks_obj = self.fetch_data(self.album_url + deezer_id + "/tracks")
+ tracks_obj = self.fetch_data(f"{self.album_url}{deezer_id}/tracks")
if tracks_obj is None:
return None
try:
@@ -170,7 +172,7 @@ class DeezerPlugin(SearchApiMetadataSourcePlugin[IDResponse]):
# the track's disc).
if not (
album_tracks_obj := self.fetch_data(
- self.album_url + str(track_data["album"]["id"]) + "/tracks"
+ f"{self.album_url}{track_data['album']['id']}/tracks"
)
):
return None
@@ -216,27 +218,6 @@ class DeezerPlugin(SearchApiMetadataSourcePlugin[IDResponse]):
deezer_updated=time.time(),
)
- @staticmethod
- def _construct_search_query(
- filters: SearchFilter, keywords: str = ""
- ) -> str:
- """Construct a query string with the specified filters and keywords to
- be provided to the Deezer Search API
- (https://developers.deezer.com/api/search).
-
- :param filters: Field filters to apply.
- :param keywords: (Optional) Query keywords to use.
- :return: Query string to be provided to the Search API.
- """
- query_components = [
- keywords,
- " ".join(f'{k}:"{v}"' for k, v in filters.items()),
- ]
- query = " ".join([q for q in query_components if q])
- if not isinstance(query, str):
- query = query.decode("utf8")
- return unidecode.unidecode(query)
-
def _search_api(
self,
query_type: Literal[
@@ -250,37 +231,42 @@ class DeezerPlugin(SearchApiMetadataSourcePlugin[IDResponse]):
"user",
],
filters: SearchFilter,
- keywords="",
+ query_string: str = "",
) -> Sequence[IDResponse]:
- """Query the Deezer Search API for the specified ``keywords``, applying
+ """Query the Deezer Search API for the specified ``query_string``, applying
the provided ``filters``.
:param filters: Field filters to apply.
- :param keywords: Query keywords to use.
+ :param query_string: Additional query to include in the search.
:return: JSON data for the class:`Response ` object or None
if no search results are returned.
"""
- query = self._construct_search_query(keywords=keywords, filters=filters)
- self._log.debug(f"Searching {self.data_source} for '{query}'")
+ query = self._construct_search_query(
+ query_string=query_string, filters=filters
+ )
+ self._log.debug("Searching {.data_source} for '{}'", self, query)
try:
response = requests.get(
- self.search_url + query_type,
- params={"q": query},
+ f"{self.search_url}{query_type}",
+ params={
+ "q": query,
+ "limit": self.config["search_limit"].get(),
+ },
timeout=10,
)
response.raise_for_status()
except requests.exceptions.RequestException as e:
self._log.error(
- "Error fetching data from {} API\n Error: {}",
- self.data_source,
+ "Error fetching data from {.data_source} API\n Error: {}",
+ self,
e,
)
return ()
response_data: Sequence[IDResponse] = response.json().get("data", [])
self._log.debug(
- "Found {} result(s) from {} for '{}'",
+ "Found {} result(s) from {.data_source} for '{}'",
len(response_data),
- self.data_source,
+ self,
query,
)
return response_data
diff --git a/beetsplug/discogs.py b/beetsplug/discogs.py
index ac7421c5f..be1cf97fa 100644
--- a/beetsplug/discogs.py
+++ b/beetsplug/discogs.py
@@ -27,13 +27,13 @@ import time
import traceback
from functools import cache
from string import ascii_lowercase
-from typing import TYPE_CHECKING, Sequence
+from typing import TYPE_CHECKING, Sequence, cast
import confuse
from discogs_client import Client, Master, Release
from discogs_client.exceptions import DiscogsAPIError
from requests.exceptions import ConnectionError
-from typing_extensions import TypedDict
+from typing_extensions import NotRequired, TypedDict
import beets
import beets.ui
@@ -76,6 +76,8 @@ TRACK_INDEX_RE = re.compile(
re.VERBOSE,
)
+DISAMBIGUATION_RE = re.compile(r" \(\d+\)")
+
class ReleaseFormat(TypedDict):
name: str
@@ -83,6 +85,42 @@ class ReleaseFormat(TypedDict):
descriptions: list[str] | None
+class Artist(TypedDict):
+ name: str
+ anv: str
+ join: str
+ role: str
+ tracks: str
+ id: str
+ resource_url: str
+
+
+class Track(TypedDict):
+ position: str
+ type_: str
+ title: str
+ duration: str
+ artists: list[Artist]
+ extraartists: NotRequired[list[Artist]]
+
+
+class TrackWithSubtracks(Track):
+ sub_tracks: list[TrackWithSubtracks]
+
+
+class IntermediateTrackInfo(TrackInfo):
+ """Allows work with string mediums from
+ get_track_info"""
+
+ def __init__(
+ self,
+ medium_str: str | None,
+ **kwargs,
+ ) -> None:
+ self.medium_str = medium_str
+ super().__init__(**kwargs)
+
+
class DiscogsPlugin(MetadataSourcePlugin):
def __init__(self):
super().__init__()
@@ -91,12 +129,17 @@ class DiscogsPlugin(MetadataSourcePlugin):
"apikey": API_KEY,
"apisecret": API_SECRET,
"tokenfile": "discogs_token.json",
- "source_weight": 0.5,
"user_token": "",
"separator": ", ",
"index_tracks": False,
"append_style_genre": False,
- "search_limit": 5,
+ "strip_disambiguation": True,
+ "featured_string": "Feat.",
+ "anv": {
+ "artist_credit": True,
+ "artist": False,
+ "album_artist": False,
+ },
}
)
self.config["apikey"].redact = True
@@ -104,7 +147,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
self.config["user_token"].redact = True
self.setup()
- def setup(self, session=None):
+ def setup(self, session=None) -> None:
"""Create the `discogs_client` field. Authenticate if necessary."""
c_key = self.config["apikey"].as_str()
c_secret = self.config["apisecret"].as_str()
@@ -130,22 +173,22 @@ class DiscogsPlugin(MetadataSourcePlugin):
self.discogs_client = Client(USER_AGENT, c_key, c_secret, token, secret)
- def reset_auth(self):
+ def reset_auth(self) -> None:
"""Delete token file & redo the auth steps."""
os.remove(self._tokenfile())
self.setup()
- def _tokenfile(self):
+ def _tokenfile(self) -> str:
"""Get the path to the JSON file for storing the OAuth token."""
return self.config["tokenfile"].get(confuse.Filename(in_app_dir=True))
- def authenticate(self, c_key, c_secret):
+ def authenticate(self, c_key: str, c_secret: str) -> tuple[str, str]:
# Get the link for the OAuth page.
auth_client = Client(USER_AGENT, c_key, c_secret)
try:
_, _, url = auth_client.get_authorize_url()
except CONNECTION_ERRORS as e:
- self._log.debug("connection error: {0}", e)
+ self._log.debug("connection error: {}", e)
raise beets.ui.UserError("communication with Discogs failed")
beets.ui.print_("To authenticate with Discogs, visit:")
@@ -158,11 +201,11 @@ class DiscogsPlugin(MetadataSourcePlugin):
except DiscogsAPIError:
raise beets.ui.UserError("Discogs authorization failed")
except CONNECTION_ERRORS as e:
- self._log.debug("connection error: {0}", e)
+ self._log.debug("connection error: {}", e)
raise beets.ui.UserError("Discogs token request failed")
# Save the token for later use.
- self._log.debug("Discogs token {0}, secret {1}", token, secret)
+ self._log.debug("Discogs token {}, secret {}", token, secret)
with open(self._tokenfile(), "w") as f:
json.dump({"token": token, "secret": secret}, f)
@@ -202,7 +245,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
"""Fetches an album by its Discogs ID and returns an AlbumInfo object
or None if the album is not found.
"""
- self._log.debug("Searching for release {0}", album_id)
+ self._log.debug("Searching for release {}", album_id)
discogs_id = self._extract_id(album_id)
@@ -216,7 +259,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(
- "API Error: {0} (query: {1})",
+ "API Error: {} (query: {})",
e,
result.data["resource_url"],
)
@@ -250,7 +293,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
try:
results = self.discogs_client.search(query, type="release")
- results.per_page = self.config["search_limit"].as_number()
+ results.per_page = self.config["search_limit"].get()
releases = results.page(1)
except CONNECTION_ERRORS:
self._log.debug(
@@ -266,7 +309,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
"""Fetches a master release given its Discogs ID and returns its year
or None if the master release is not found.
"""
- self._log.debug("Getting master release {0}", master_id)
+ self._log.debug("Getting master release {}", master_id)
result = Master(self.discogs_client, {"id": master_id})
try:
@@ -274,7 +317,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
except DiscogsAPIError as e:
if e.status_code != 404:
self._log.debug(
- "API Error: {0} (query: {1})",
+ "API Error: {} (query: {})",
e,
result.data["resource_url"],
)
@@ -300,7 +343,26 @@ class DiscogsPlugin(MetadataSourcePlugin):
return media, albumtype
- def get_album_info(self, result):
+ def get_artist_with_anv(
+ self, artists: list[Artist], use_anv: bool = False
+ ) -> tuple[str, str | None]:
+ """Iterates through a discogs result, fetching data
+ if the artist anv is to be used, maps that to the name.
+ Calls the parent class get_artist method."""
+ artist_list: list[dict[str | int, str]] = []
+ for artist_data in artists:
+ a: dict[str | int, str] = {
+ "name": artist_data["name"],
+ "id": artist_data["id"],
+ "join": artist_data.get("join", ""),
+ }
+ if use_anv and (anv := artist_data.get("anv", "")):
+ a["name"] = anv
+ artist_list.append(a)
+ artist, artist_id = self.get_artist(artist_list, join_key="join")
+ return self.strip_disambiguation(artist), artist_id
+
+ def get_album_info(self, result: Release) -> AlbumInfo | None:
"""Returns an AlbumInfo object for a discogs Release object."""
# Explicitly reload the `Release` fields, as they might not be yet
# present if the result is from a `discogs_client.search()`.
@@ -328,16 +390,29 @@ class DiscogsPlugin(MetadataSourcePlugin):
self._log.warning("Release does not contain the required fields")
return None
- artist, artist_id = self.get_artist(
- [a.data for a in result.artists], join_key="join"
+ artist_data = [a.data for a in result.artists]
+ album_artist, album_artist_id = self.get_artist_with_anv(artist_data)
+ album_artist_anv, _ = self.get_artist_with_anv(
+ artist_data, use_anv=True
)
+ artist_credit = album_artist_anv
+
album = re.sub(r" +", " ", result.title)
album_id = result.data["id"]
# Use `.data` to access the tracklist directly instead of the
# convenient `.tracklist` property, which will strip out useful artist
# information and leave us with skeleton `Artist` objects that will
# each make an API call just to get the same data back.
- tracks = self.get_tracks(result.data["tracklist"])
+ tracks = self.get_tracks(
+ result.data["tracklist"],
+ (album_artist, album_artist_anv, album_artist_id),
+ )
+
+ # Assign ANV to the proper fields for tagging
+ if not self.config["anv"]["artist_credit"]:
+ artist_credit = album_artist
+ if self.config["anv"]["album_artist"]:
+ album_artist = album_artist_anv
# Extract information for the optional AlbumInfo fields, if possible.
va = result.data["artists"][0].get("name", "").lower() == "various"
@@ -363,15 +438,20 @@ class DiscogsPlugin(MetadataSourcePlugin):
label = catalogno = labelid = None
if result.data.get("labels"):
- label = result.data["labels"][0].get("name")
+ label = self.strip_disambiguation(
+ result.data["labels"][0].get("name")
+ )
catalogno = result.data["labels"][0].get("catno")
labelid = result.data["labels"][0].get("id")
cover_art_url = self.select_cover_art(result)
- # Additional cleanups (various artists name, catalog number, media).
+ # Additional cleanups
+ # (various artists name, catalog number, media, disambiguation).
if va:
- artist = config["va_name"].as_str()
+ va_name = config["va_name"].as_str()
+ album_artist = va_name
+ artist_credit = va_name
if catalogno == "none":
catalogno = None
# Explicitly set the `media` for the tracks, since it is expected by
@@ -379,13 +459,9 @@ class DiscogsPlugin(MetadataSourcePlugin):
for track in tracks:
track.media = media
track.medium_total = mediums.count(track.medium)
- if not track.artist: # get_track_info often fails to find artist
- track.artist = artist
- if not track.artist_id:
- track.artist_id = artist_id
# Discogs does not have track IDs. Invent our own IDs as proposed
# in #2336.
- track.track_id = str(album_id) + "-" + track.track_alt
+ track.track_id = f"{album_id}-{track.track_alt}"
track.data_url = data_url
track.data_source = "Discogs"
@@ -398,8 +474,9 @@ class DiscogsPlugin(MetadataSourcePlugin):
return AlbumInfo(
album=album,
album_id=album_id,
- artist=artist,
- artist_id=artist_id,
+ artist=album_artist,
+ artist_credit=artist_credit,
+ artist_id=album_artist_id,
tracks=tracks,
albumtype=albumtype,
va=va,
@@ -417,11 +494,11 @@ class DiscogsPlugin(MetadataSourcePlugin):
data_url=data_url,
discogs_albumid=discogs_albumid,
discogs_labelid=labelid,
- discogs_artistid=artist_id,
+ discogs_artistid=album_artist_id,
cover_art_url=cover_art_url,
)
- def select_cover_art(self, result):
+ def select_cover_art(self, result: Release) -> str | None:
"""Returns the best candidate image, if any, from a Discogs `Release` object."""
if result.data.get("images") and len(result.data.get("images")) > 0:
# The first image in this list appears to be the one displayed first
@@ -431,7 +508,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
return None
- def format(self, classification):
+ def format(self, classification: Iterable[str]) -> str | None:
if classification:
return (
self.config["separator"].as_str().join(sorted(classification))
@@ -439,22 +516,17 @@ class DiscogsPlugin(MetadataSourcePlugin):
else:
return None
- def get_tracks(self, tracklist):
- """Returns a list of TrackInfo objects for a discogs tracklist."""
- try:
- clean_tracklist = self.coalesce_tracks(tracklist)
- except Exception as exc:
- # FIXME: this is an extra precaution for making sure there are no
- # side effects after #2222. It should be removed after further
- # testing.
- self._log.debug("{}", traceback.format_exc())
- self._log.error("uncaught exception in coalesce_tracks: {}", exc)
- clean_tracklist = tracklist
- tracks = []
+ def _process_clean_tracklist(
+ self,
+ clean_tracklist: list[Track],
+ album_artist_data: tuple[str, str, str | None],
+ ) -> tuple[list[TrackInfo], dict[int, str], int, list[str], list[str]]:
+ # Distinct works and intra-work divisions, as defined by index tracks.
+ tracks: list[TrackInfo] = []
index_tracks = {}
index = 0
- # Distinct works and intra-work divisions, as defined by index tracks.
- divisions, next_divisions = [], []
+ divisions: list[str] = []
+ next_divisions: list[str] = []
for track in clean_tracklist:
# Only real tracks have `position`. Otherwise, it's an index track.
if track["position"]:
@@ -464,7 +536,9 @@ class DiscogsPlugin(MetadataSourcePlugin):
# divisions.
divisions += next_divisions
del next_divisions[:]
- track_info = self.get_track_info(track, index, divisions)
+ track_info = self.get_track_info(
+ track, index, divisions, album_artist_data
+ )
track_info.track_alt = track["position"]
tracks.append(track_info)
else:
@@ -476,7 +550,29 @@ class DiscogsPlugin(MetadataSourcePlugin):
except IndexError:
pass
index_tracks[index + 1] = track["title"]
+ return tracks, index_tracks, index, divisions, next_divisions
+ def get_tracks(
+ self,
+ tracklist: list[Track],
+ album_artist_data: tuple[str, str, str | None],
+ ) -> list[TrackInfo]:
+ """Returns a list of TrackInfo objects for a discogs tracklist."""
+ try:
+ clean_tracklist: list[Track] = self.coalesce_tracks(
+ cast(list[TrackWithSubtracks], tracklist)
+ )
+ except Exception as exc:
+ # FIXME: this is an extra precaution for making sure there are no
+ # side effects after #2222. It should be removed after further
+ # testing.
+ self._log.debug("{}", traceback.format_exc())
+ self._log.error("uncaught exception in coalesce_tracks: {}", exc)
+ clean_tracklist = tracklist
+ processed = self._process_clean_tracklist(
+ clean_tracklist, album_artist_data
+ )
+ tracks, index_tracks, index, divisions, next_divisions = processed
# Fix up medium and medium_index for each track. Discogs position is
# unreliable, but tracks are in order.
medium = None
@@ -485,8 +581,8 @@ class DiscogsPlugin(MetadataSourcePlugin):
# If a medium has two sides (ie. vinyl or cassette), each pair of
# consecutive sides should belong to the same medium.
- if all([track.medium is not None for track in tracks]):
- m = sorted({track.medium.lower() for track in tracks})
+ if all([track.medium_str is not None for track in tracks]):
+ m = sorted({track.medium_str.lower() for track in tracks})
# If all track.medium are single consecutive letters, assume it is
# a 2-sided medium.
if "".join(m) in ascii_lowercase:
@@ -500,17 +596,17 @@ class DiscogsPlugin(MetadataSourcePlugin):
# side_count is the number of mediums or medium sides (in the case
# of two-sided mediums) that were seen before.
medium_is_index = (
- track.medium
+ track.medium_str
and not track.medium_index
and (
- len(track.medium) != 1
+ len(track.medium_str) != 1
or
# Not within standard incremental medium values (A, B, C, ...).
- ord(track.medium) - 64 != side_count + 1
+ ord(track.medium_str) - 64 != side_count + 1
)
)
- if not medium_is_index and medium != track.medium:
+ if not medium_is_index and medium != track.medium_str:
side_count += 1
if sides_per_medium == 2:
if side_count % sides_per_medium:
@@ -521,7 +617,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
# Medium changed. Reset index_count.
medium_count += 1
index_count = 0
- medium = track.medium
+ medium = track.medium_str
index_count += 1
medium_count = 1 if medium_count == 0 else medium_count
@@ -537,22 +633,27 @@ class DiscogsPlugin(MetadataSourcePlugin):
disctitle = None
track.disctitle = disctitle
- return tracks
+ return cast(list[TrackInfo], tracks)
- def coalesce_tracks(self, raw_tracklist):
+ def coalesce_tracks(
+ self, raw_tracklist: list[TrackWithSubtracks]
+ ) -> list[Track]:
"""Pre-process a tracklist, merging subtracks into a single track. The
title for the merged track is the one from the previous index track,
if present; otherwise it is a combination of the subtracks titles.
"""
- def add_merged_subtracks(tracklist, subtracks):
+ def add_merged_subtracks(
+ tracklist: list[TrackWithSubtracks],
+ subtracks: list[TrackWithSubtracks],
+ ) -> None:
"""Modify `tracklist` in place, merging a list of `subtracks` into
a single track into `tracklist`."""
# Calculate position based on first subtrack, without subindex.
idx, medium_idx, sub_idx = self.get_track_index(
subtracks[0]["position"]
)
- position = "{}{}".format(idx or "", medium_idx or "")
+ position = f"{idx or ''}{medium_idx or ''}"
if tracklist and not tracklist[-1]["position"]:
# Assume the previous index track contains the track title.
@@ -574,8 +675,8 @@ class DiscogsPlugin(MetadataSourcePlugin):
# option is set
if self.config["index_tracks"]:
for subtrack in subtracks:
- subtrack["title"] = "{}: {}".format(
- index_track["title"], subtrack["title"]
+ subtrack["title"] = (
+ f"{index_track['title']}: {subtrack['title']}"
)
tracklist.extend(subtracks)
else:
@@ -585,8 +686,8 @@ class DiscogsPlugin(MetadataSourcePlugin):
tracklist.append(track)
# Pre-process the tracklist, trying to identify subtracks.
- subtracks = []
- tracklist = []
+ subtracks: list[TrackWithSubtracks] = []
+ tracklist: list[TrackWithSubtracks] = []
prev_subindex = ""
for track in raw_tracklist:
# Regular subtrack (track with subindex).
@@ -621,10 +722,32 @@ class DiscogsPlugin(MetadataSourcePlugin):
if subtracks:
add_merged_subtracks(tracklist, subtracks)
- return tracklist
+ return cast(list[Track], tracklist)
- def get_track_info(self, track, index, divisions):
+ def strip_disambiguation(self, text: str) -> str:
+ """Removes discogs specific disambiguations from a string.
+ Turns 'Label Name (5)' to 'Label Name' or 'Artist (1) & Another Artist (2)'
+ to 'Artist & Another Artist'. Does nothing if strip_disambiguation is False."""
+ if not self.config["strip_disambiguation"]:
+ return text
+ return DISAMBIGUATION_RE.sub("", text)
+
+ def get_track_info(
+ self,
+ track: Track,
+ index: int,
+ divisions: list[str],
+ album_artist_data: tuple[str, str, str | None],
+ ) -> IntermediateTrackInfo:
"""Returns a TrackInfo object for a discogs track."""
+
+ artist, artist_anv, artist_id = album_artist_data
+ artist_credit = artist_anv
+ if not self.config["anv"]["artist_credit"]:
+ artist_credit = artist
+ if self.config["anv"]["artist"]:
+ artist = artist_anv
+
title = track["title"]
if self.config["index_tracks"]:
prefix = ", ".join(divisions)
@@ -632,18 +755,44 @@ class DiscogsPlugin(MetadataSourcePlugin):
title = f"{prefix}: {title}"
track_id = None
medium, medium_index, _ = self.get_track_index(track["position"])
- artist, artist_id = self.get_artist(
- track.get("artists", []), join_key="join"
- )
+
+ # If artists are found on the track, we will use those instead
+ if artists := track.get("artists", []):
+ artist, artist_id = self.get_artist_with_anv(
+ artists, self.config["anv"]["artist"]
+ )
+ artist_credit, _ = self.get_artist_with_anv(
+ artists, self.config["anv"]["artist_credit"]
+ )
length = self.get_track_length(track["duration"])
- return TrackInfo(
+
+ # Add featured artists
+ if extraartists := track.get("extraartists", []):
+ featured_list = [
+ artist
+ for artist in extraartists
+ if "Featuring" in artist["role"]
+ ]
+ featured, _ = self.get_artist_with_anv(
+ featured_list, self.config["anv"]["artist"]
+ )
+ featured_credit, _ = self.get_artist_with_anv(
+ featured_list, self.config["anv"]["artist_credit"]
+ )
+ if featured:
+ artist += f" {self.config['featured_string']} {featured}"
+ artist_credit += (
+ f" {self.config['featured_string']} {featured_credit}"
+ )
+ return IntermediateTrackInfo(
title=title,
track_id=track_id,
+ artist_credit=artist_credit,
artist=artist,
artist_id=artist_id,
length=length,
index=index,
- medium=medium,
+ medium_str=medium,
medium_index=medium_index,
)
@@ -664,7 +813,7 @@ class DiscogsPlugin(MetadataSourcePlugin):
return medium or None, index or None, subindex or None
- def get_track_length(self, duration):
+ def get_track_length(self, duration: str) -> int | None:
"""Returns the track length in seconds for a discogs duration."""
try:
length = time.strptime(duration, "%M:%S")
diff --git a/beetsplug/duplicates.py b/beetsplug/duplicates.py
index ea7abaaff..904e19262 100644
--- a/beetsplug/duplicates.py
+++ b/beetsplug/duplicates.py
@@ -150,7 +150,7 @@ class DuplicatesPlugin(BeetsPlugin):
count = self.config["count"].get(bool)
delete = self.config["delete"].get(bool)
remove = self.config["remove"].get(bool)
- fmt = self.config["format"].get(str)
+ fmt_tmpl = self.config["format"].get(str)
full = self.config["full"].get(bool)
keys = self.config["keys"].as_str_seq()
merge = self.config["merge"].get(bool)
@@ -175,15 +175,14 @@ class DuplicatesPlugin(BeetsPlugin):
return
if path:
- fmt = "$path"
+ fmt_tmpl = "$path"
# Default format string for count mode.
- if count and not fmt:
+ if count and not fmt_tmpl:
if album:
- fmt = "$albumartist - $album"
+ fmt_tmpl = "$albumartist - $album"
else:
- fmt = "$albumartist - $album - $title"
- fmt += ": {0}"
+ fmt_tmpl = "$albumartist - $album - $title"
if checksum:
for i in items:
@@ -207,7 +206,7 @@ class DuplicatesPlugin(BeetsPlugin):
delete=delete,
remove=remove,
tag=tag,
- fmt=fmt.format(obj_count),
+ fmt=f"{fmt_tmpl}: {obj_count}",
)
self._command.func = _dup
@@ -255,28 +254,24 @@ class DuplicatesPlugin(BeetsPlugin):
checksum = getattr(item, key, False)
if not checksum:
self._log.debug(
- "key {0} on item {1} not cached:computing checksum",
+ "key {} on item {.filepath} not cached:computing checksum",
key,
- displayable_path(item.path),
+ item,
)
try:
checksum = command_output(args).stdout
setattr(item, key, checksum)
item.store()
self._log.debug(
- "computed checksum for {0} using {1}", item.title, key
+ "computed checksum for {.title} using {}", item, key
)
except subprocess.CalledProcessError as e:
- self._log.debug(
- "failed to checksum {0}: {1}",
- displayable_path(item.path),
- e,
- )
+ self._log.debug("failed to checksum {.filepath}: {}", item, e)
else:
self._log.debug(
- "key {0} on item {1} cached:not computing checksum",
+ "key {} on item {.filepath} cached:not computing checksum",
key,
- displayable_path(item.path),
+ item,
)
return key, checksum
@@ -294,15 +289,15 @@ class DuplicatesPlugin(BeetsPlugin):
values = [v for v in values if v not in (None, "")]
if strict and len(values) < len(keys):
self._log.debug(
- "some keys {0} on item {1} are null or empty: skipping",
+ "some keys {} on item {.filepath} are null or empty: skipping",
keys,
- displayable_path(obj.path),
+ obj,
)
elif not strict and not len(values):
self._log.debug(
- "all keys {0} on item {1} are null or empty: skipping",
+ "all keys {} on item {.filepath} are null or empty: skipping",
keys,
- displayable_path(obj.path),
+ obj,
)
else:
key = tuple(values)
@@ -360,11 +355,11 @@ class DuplicatesPlugin(BeetsPlugin):
value = getattr(o, f, None)
if value:
self._log.debug(
- "key {0} on item {1} is null "
- "or empty: setting from item {2}",
+ "key {} on item {} is null "
+ "or empty: setting from item {.filepath}",
f,
displayable_path(objs[0].path),
- displayable_path(o.path),
+ o,
)
setattr(objs[0], f, value)
objs[0].store()
@@ -384,11 +379,11 @@ class DuplicatesPlugin(BeetsPlugin):
missing.album_id = objs[0].id
missing.add(i._db)
self._log.debug(
- "item {0} missing from album {1}:"
- " merging from {2} into {3}",
+ "item {} missing from album {}:"
+ " merging from {.filepath} into {}",
missing,
objs[0],
- displayable_path(o.path),
+ o,
displayable_path(missing.destination()),
)
missing.move(operation=MoveOperation.COPY)
diff --git a/beetsplug/edit.py b/beetsplug/edit.py
index 52387c314..f6fadefd0 100644
--- a/beetsplug/edit.py
+++ b/beetsplug/edit.py
@@ -46,9 +46,7 @@ def edit(filename, log):
try:
subprocess.call(cmd)
except OSError as exc:
- raise ui.UserError(
- "could not run editor command {!r}: {}".format(cmd[0], exc)
- )
+ raise ui.UserError(f"could not run editor command {cmd[0]!r}: {exc}")
def dump(arg):
@@ -71,9 +69,7 @@ def load(s):
for d in yaml.safe_load_all(s):
if not isinstance(d, dict):
raise ParseError(
- "each entry must be a dictionary; found {}".format(
- type(d).__name__
- )
+ f"each entry must be a dictionary; found {type(d).__name__}"
)
# Convert all keys to strings. They started out as strings,
diff --git a/beetsplug/embedart.py b/beetsplug/embedart.py
index 8df3c3c05..cbf40f570 100644
--- a/beetsplug/embedart.py
+++ b/beetsplug/embedart.py
@@ -20,11 +20,12 @@ from mimetypes import guess_extension
import requests
-from beets import art, config, ui
+from beets import config, ui
from beets.plugins import BeetsPlugin
from beets.ui import print_
from beets.util import bytestring_path, displayable_path, normpath, syspath
from beets.util.artresizer import ArtResizer
+from beetsplug._utils import art
def _confirm(objs, album):
@@ -35,8 +36,9 @@ def _confirm(objs, album):
to items).
"""
noun = "album" if album else "file"
- prompt = "Modify artwork for {} {}{} (Y/n)?".format(
- len(objs), noun, "s" if len(objs) > 1 else ""
+ prompt = (
+ "Modify artwork for"
+ f" {len(objs)} {noun}{'s' if len(objs) > 1 else ''} (Y/n)?"
)
# Show all the items or albums.
@@ -110,9 +112,7 @@ class EmbedCoverArtPlugin(BeetsPlugin):
imagepath = normpath(opts.file)
if not os.path.isfile(syspath(imagepath)):
raise ui.UserError(
- "image file {} not found".format(
- displayable_path(imagepath)
- )
+ f"image file {displayable_path(imagepath)} not found"
)
items = lib.items(args)
@@ -137,7 +137,7 @@ class EmbedCoverArtPlugin(BeetsPlugin):
response = requests.get(opts.url, timeout=5)
response.raise_for_status()
except requests.exceptions.RequestException as e:
- self._log.error("{}".format(e))
+ self._log.error("{}", e)
return
extension = guess_extension(response.headers["Content-Type"])
if extension is None:
@@ -149,7 +149,7 @@ class EmbedCoverArtPlugin(BeetsPlugin):
with open(tempimg, "wb") as f:
f.write(response.content)
except Exception as e:
- self._log.error("Unable to save image: {}".format(e))
+ self._log.error("Unable to save image: {}", e)
return
items = lib.items(args)
# Confirm with user.
@@ -274,7 +274,7 @@ class EmbedCoverArtPlugin(BeetsPlugin):
"""
if self.config["remove_art_file"] and album.artpath:
if os.path.isfile(syspath(album.artpath)):
- self._log.debug("Removing album art file for {0}", album)
+ self._log.debug("Removing album art file for {}", album)
os.remove(syspath(album.artpath))
album.artpath = None
album.store()
diff --git a/beetsplug/embyupdate.py b/beetsplug/embyupdate.py
index c696f39f3..25f3ed8b3 100644
--- a/beetsplug/embyupdate.py
+++ b/beetsplug/embyupdate.py
@@ -38,9 +38,7 @@ def api_url(host, port, endpoint):
hostname_list.insert(0, "http://")
hostname = "".join(hostname_list)
- joined = urljoin(
- "{hostname}:{port}".format(hostname=hostname, port=port), endpoint
- )
+ joined = urljoin(f"{hostname}:{port}", endpoint)
scheme, netloc, path, query_string, fragment = urlsplit(joined)
query_params = parse_qs(query_string)
@@ -81,12 +79,12 @@ def create_headers(user_id, token=None):
headers = {}
authorization = (
- 'MediaBrowser UserId="{user_id}", '
+ f'MediaBrowser UserId="{user_id}", '
'Client="other", '
'Device="beets", '
'DeviceId="beets", '
'Version="0.0.0"'
- ).format(user_id=user_id)
+ )
headers["x-emby-authorization"] = authorization
@@ -186,7 +184,7 @@ class EmbyUpdate(BeetsPlugin):
# Get user information from the Emby API.
user = get_user(host, port, username)
if not user:
- self._log.warning(f"User {username} could not be found.")
+ self._log.warning("User {} could not be found.", username)
return
userid = user[0]["Id"]
@@ -198,7 +196,7 @@ class EmbyUpdate(BeetsPlugin):
# Get authentication token.
token = get_token(host, port, headers, auth_data)
if not token:
- self._log.warning("Could not get token for user {0}", username)
+ self._log.warning("Could not get token for user {}", username)
return
# Recreate headers with a token.
diff --git a/beetsplug/export.py b/beetsplug/export.py
index 05ca3f24a..e6c2b88c7 100644
--- a/beetsplug/export.py
+++ b/beetsplug/export.py
@@ -150,7 +150,7 @@ class ExportPlugin(BeetsPlugin):
try:
data, item = data_emitter(included_keys or "*")
except (mediafile.UnreadableFileError, OSError) as ex:
- self._log.error("cannot read file: {0}", ex)
+ self._log.error("cannot read file: {}", ex)
continue
for key, value in data.items():
diff --git a/beetsplug/fetchart.py b/beetsplug/fetchart.py
index e1ec5aa09..37e7426f6 100644
--- a/beetsplug/fetchart.py
+++ b/beetsplug/fetchart.py
@@ -36,10 +36,10 @@ from beets.util.config import sanitize_pairs
if TYPE_CHECKING:
from collections.abc import Iterable, Iterator, Sequence
- from logging import Logger
from beets.importer import ImportSession, ImportTask
from beets.library import Album, Library
+ from beets.logging import BeetsLogger as Logger
try:
from bs4 import BeautifulSoup, Tag
@@ -133,7 +133,7 @@ class Candidate:
# get_size returns None if no local imaging backend is available
if not self.size:
self.size = ArtResizer.shared.get_size(self.path)
- self._log.debug("image size: {}", self.size)
+ self._log.debug("image size: {.size}", self)
if not self.size:
self._log.warning(
@@ -151,7 +151,7 @@ class Candidate:
# Check minimum dimension.
if plugin.minwidth and self.size[0] < plugin.minwidth:
self._log.debug(
- "image too small ({} < {})", self.size[0], plugin.minwidth
+ "image too small ({} < {.minwidth})", self.size[0], plugin
)
return ImageAction.BAD
@@ -162,10 +162,10 @@ class Candidate:
if edge_diff > plugin.margin_px:
self._log.debug(
"image is not close enough to being "
- "square, ({} - {} > {})",
+ "square, ({} - {} > {.margin_px})",
long_edge,
short_edge,
- plugin.margin_px,
+ plugin,
)
return ImageAction.BAD
elif plugin.margin_percent:
@@ -190,7 +190,7 @@ class Candidate:
downscale = False
if plugin.maxwidth and self.size[0] > plugin.maxwidth:
self._log.debug(
- "image needs rescaling ({} > {})", self.size[0], plugin.maxwidth
+ "image needs rescaling ({} > {.maxwidth})", self.size[0], plugin
)
downscale = True
@@ -200,9 +200,9 @@ class Candidate:
filesize = os.stat(syspath(self.path)).st_size
if filesize > plugin.max_filesize:
self._log.debug(
- "image needs resizing ({}B > {}B)",
+ "image needs resizing ({}B > {.max_filesize}B)",
filesize,
- plugin.max_filesize,
+ plugin,
)
downsize = True
@@ -213,9 +213,9 @@ class Candidate:
reformat = fmt != plugin.cover_format
if reformat:
self._log.debug(
- "image needs reformatting: {} -> {}",
+ "image needs reformatting: {} -> {.cover_format}",
fmt,
- plugin.cover_format,
+ plugin,
)
skip_check_for = skip_check_for or []
@@ -329,7 +329,7 @@ def _logged_get(log: Logger, *args, **kwargs) -> requests.Response:
prepped.url, {}, None, None, None
)
send_kwargs.update(settings)
- log.debug("{}: {}", message, prepped.url)
+ log.debug("{}: {.url}", message, prepped)
return s.send(prepped, **send_kwargs)
@@ -542,14 +542,14 @@ class CoverArtArchive(RemoteArtSource):
try:
response = self.request(url)
except requests.RequestException:
- self._log.debug("{}: error receiving response", self.NAME)
+ self._log.debug("{.NAME}: error receiving response", self)
return
try:
data = response.json()
except ValueError:
self._log.debug(
- "{}: error loading response: {}", self.NAME, response.text
+ "{.NAME}: error loading response: {.text}", self, response
)
return
@@ -593,7 +593,7 @@ class CoverArtArchive(RemoteArtSource):
class Amazon(RemoteArtSource):
NAME = "Amazon"
ID = "amazon"
- URL = "https://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg"
+ URL = "https://images.amazon.com/images/P/{}.{:02d}.LZZZZZZZ.jpg"
INDICES = (1, 2)
def get(
@@ -606,7 +606,7 @@ class Amazon(RemoteArtSource):
if album.asin:
for index in self.INDICES:
yield self._candidate(
- url=self.URL % (album.asin, index),
+ url=self.URL.format(album.asin, index),
match=MetadataMatch.EXACT,
)
@@ -629,7 +629,7 @@ class AlbumArtOrg(RemoteArtSource):
# Get the page from albumart.org.
try:
resp = self.request(self.URL, params={"asin": album.asin})
- self._log.debug("scraped art URL: {}", resp.url)
+ self._log.debug("scraped art URL: {.url}", resp)
except requests.RequestException:
self._log.debug("error scraping art page")
return
@@ -682,7 +682,7 @@ class GoogleImages(RemoteArtSource):
"""
if not (album.albumartist and album.album):
return
- search_string = (album.albumartist + "," + album.album).encode("utf-8")
+ search_string = f"{album.albumartist},{album.album}".encode("utf-8")
try:
response = self.request(
@@ -702,7 +702,7 @@ class GoogleImages(RemoteArtSource):
try:
data = response.json()
except ValueError:
- self._log.debug("google: error loading response: {}", response.text)
+ self._log.debug("google: error loading response: {.text}", response)
return
if "error" in data:
@@ -723,7 +723,7 @@ class FanartTV(RemoteArtSource):
NAME = "fanart.tv"
ID = "fanarttv"
API_URL = "https://webservice.fanart.tv/v3/"
- API_ALBUMS = API_URL + "music/albums/"
+ API_ALBUMS = f"{API_URL}music/albums/"
PROJECT_KEY = "61a7d0ab4e67162b7a0c7c35915cd48e"
def __init__(self, *args, **kwargs):
@@ -750,7 +750,7 @@ class FanartTV(RemoteArtSource):
try:
response = self.request(
- self.API_ALBUMS + album.mb_releasegroupid,
+ f"{self.API_ALBUMS}{album.mb_releasegroupid}",
headers={
"api-key": self.PROJECT_KEY,
"client-key": self.client_key,
@@ -764,7 +764,7 @@ class FanartTV(RemoteArtSource):
data = response.json()
except ValueError:
self._log.debug(
- "fanart.tv: error loading response: {}", response.text
+ "fanart.tv: error loading response: {.text}", response
)
return
@@ -820,7 +820,7 @@ class ITunesStore(RemoteArtSource):
return
payload = {
- "term": album.albumartist + " " + album.album,
+ "term": f"{album.albumartist} {album.album}",
"entity": "album",
"media": "music",
"limit": 200,
@@ -947,14 +947,14 @@ class Wikipedia(RemoteArtSource):
data = dbpedia_response.json()
results = data["results"]["bindings"]
if results:
- cover_filename = "File:" + results[0]["coverFilename"]["value"]
+ cover_filename = f"File:{results[0]['coverFilename']['value']}"
page_id = results[0]["pageId"]["value"]
else:
self._log.debug("wikipedia: album not found on dbpedia")
except (ValueError, KeyError, IndexError):
self._log.debug(
- "wikipedia: error scraping dbpedia response: {}",
- dbpedia_response.text,
+ "wikipedia: error scraping dbpedia response: {.text}",
+ dbpedia_response,
)
# Ensure we have a filename before attempting to query wikipedia
@@ -996,7 +996,7 @@ class Wikipedia(RemoteArtSource):
results = data["query"]["pages"][page_id]["images"]
for result in results:
if re.match(
- re.escape(lpart) + r".*?\." + re.escape(rpart),
+ rf"{re.escape(lpart)}.*?\.{re.escape(rpart)}",
result["title"],
):
cover_filename = result["title"]
@@ -1179,7 +1179,7 @@ class LastFM(RemoteArtSource):
if "error" in data:
if data["error"] == 6:
self._log.debug(
- "lastfm: no results for {}", album.mb_albumid
+ "lastfm: no results for {.mb_albumid}", album
)
else:
self._log.error(
@@ -1200,7 +1200,7 @@ class LastFM(RemoteArtSource):
url=images[size], size=self.SIZES[size]
)
except ValueError:
- self._log.debug("lastfm: error loading response: {}", response.text)
+ self._log.debug("lastfm: error loading response: {.text}", response)
return
@@ -1227,7 +1227,7 @@ class Spotify(RemoteArtSource):
paths: None | Sequence[bytes],
) -> Iterator[Candidate]:
try:
- url = self.SPOTIFY_ALBUM_URL + album.items().get().spotify_album_id
+ url = f"{self.SPOTIFY_ALBUM_URL}{album.items().get().spotify_album_id}"
except AttributeError:
self._log.debug("Fetchart: no Spotify album ID found")
return
@@ -1244,7 +1244,7 @@ class Spotify(RemoteArtSource):
soup = BeautifulSoup(html, "html.parser")
except ValueError:
self._log.debug(
- "Spotify: error loading response: {}", response.text
+ "Spotify: error loading response: {.text}", response
)
return
@@ -1541,9 +1541,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
out = candidate
assert out.path is not None # help mypy
self._log.debug(
- "using {0.LOC} image {1}",
- source,
- util.displayable_path(out.path),
+ "using {.LOC} image {.path}", source, out
)
break
# Remove temporary files for invalid candidates.
@@ -1576,7 +1574,7 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
message = ui.colorize(
"text_highlight_minor", "has album art"
)
- self._log.info("{0}: {1}", album, message)
+ self._log.info("{}: {}", album, message)
else:
# In ordinary invocations, look for images on the
# filesystem. When forcing, however, always go to the Web
@@ -1589,4 +1587,4 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
message = ui.colorize("text_success", "found album art")
else:
message = ui.colorize("text_error", "no art found")
- self._log.info("{0}: {1}", album, message)
+ self._log.info("{}: {}", album, message)
diff --git a/beetsplug/fish.py b/beetsplug/fish.py
index 4cf9b60a1..b1518f1c4 100644
--- a/beetsplug/fish.py
+++ b/beetsplug/fish.py
@@ -89,8 +89,9 @@ class FishPlugin(BeetsPlugin):
"-o",
"--output",
default="~/.config/fish/completions/beet.fish",
- help="where to save the script. default: "
- "~/.config/fish/completions",
+ help=(
+ "where to save the script. default: ~/.config/fish/completions"
+ ),
)
return [cmd]
@@ -122,23 +123,13 @@ class FishPlugin(BeetsPlugin):
for name in names:
cmd_names_help.append((name, cmd.help))
# Concatenate the string
- totstring = HEAD + "\n"
+ totstring = f"{HEAD}\n"
totstring += get_cmds_list([name[0] for name in cmd_names_help])
totstring += "" if nobasicfields else get_standard_fields(fields)
totstring += get_extravalues(lib, extravalues) if extravalues else ""
- totstring += (
- "\n"
- + "# ====== {} =====".format("setup basic beet completion")
- + "\n" * 2
- )
+ totstring += "\n# ====== setup basic beet completion =====\n\n"
totstring += get_basic_beet_options()
- totstring += (
- "\n"
- + "# ====== {} =====".format(
- "setup field completion for subcommands"
- )
- + "\n"
- )
+ totstring += "\n# ====== setup field completion for subcommands =====\n"
totstring += get_subcommands(cmd_names_help, nobasicfields, extravalues)
# Set up completion for all the command options
totstring += get_all_commands(beetcmds)
@@ -150,23 +141,19 @@ class FishPlugin(BeetsPlugin):
def _escape(name):
# Escape ? in fish
if name == "?":
- name = "\\" + name
+ name = f"\\{name}"
return name
def get_cmds_list(cmds_names):
# Make a list of all Beets core & plugin commands
- substr = ""
- substr += "set CMDS " + " ".join(cmds_names) + ("\n" * 2)
- return substr
+ return f"set CMDS {' '.join(cmds_names)}\n\n"
def get_standard_fields(fields):
# Make a list of album/track fields and append with ':'
- fields = (field + ":" for field in fields)
- substr = ""
- substr += "set FIELDS " + " ".join(fields) + ("\n" * 2)
- return substr
+ fields = (f"{field}:" for field in fields)
+ return f"set FIELDS {' '.join(fields)}\n\n"
def get_extravalues(lib, extravalues):
@@ -175,14 +162,8 @@ def get_extravalues(lib, extravalues):
word = ""
values_set = get_set_of_values_for_field(lib, extravalues)
for fld in extravalues:
- extraname = fld.upper() + "S"
- word += (
- "set "
- + extraname
- + " "
- + " ".join(sorted(values_set[fld]))
- + ("\n" * 2)
- )
+ extraname = f"{fld.upper()}S"
+ word += f"set {extraname} {' '.join(sorted(values_set[fld]))}\n\n"
return word
@@ -226,35 +207,29 @@ def get_subcommands(cmd_name_and_help, nobasicfields, extravalues):
for cmdname, cmdhelp in cmd_name_and_help:
cmdname = _escape(cmdname)
- word += (
- "\n"
- + "# ------ {} -------".format("fieldsetups for " + cmdname)
- + "\n"
- )
+ word += f"\n# ------ fieldsetups for {cmdname} -------\n"
word += BL_NEED2.format(
- ("-a " + cmdname), ("-f " + "-d " + wrap(clean_whitespace(cmdhelp)))
+ f"-a {cmdname}", f"-f -d {wrap(clean_whitespace(cmdhelp))}"
)
if nobasicfields is False:
word += BL_USE3.format(
cmdname,
- ("-a " + wrap("$FIELDS")),
- ("-f " + "-d " + wrap("fieldname")),
+ f"-a {wrap('$FIELDS')}",
+ f"-f -d {wrap('fieldname')}",
)
if extravalues:
for f in extravalues:
- setvar = wrap("$" + f.upper() + "S")
- word += (
- " ".join(
- BL_EXTRA3.format(
- (cmdname + " " + f + ":"),
- ("-f " + "-A " + "-a " + setvar),
- ("-d " + wrap(f)),
- ).split()
- )
- + "\n"
+ setvar = wrap(f"${f.upper()}S")
+ word += " ".join(
+ BL_EXTRA3.format(
+ f"{cmdname} {f}:",
+ f"-f -A -a {setvar}",
+ f"-d {wrap(f)}",
+ ).split()
)
+ word += "\n"
return word
@@ -267,59 +242,44 @@ def get_all_commands(beetcmds):
for name in names:
name = _escape(name)
- word += "\n"
- word += (
- ("\n" * 2)
- + "# ====== {} =====".format("completions for " + name)
- + "\n"
- )
+ word += f"\n\n\n# ====== completions for {name} =====\n"
for option in cmd.parser._get_all_options()[1:]:
cmd_l = (
- (" -l " + option._long_opts[0].replace("--", ""))
+ f" -l {option._long_opts[0].replace('--', '')}"
if option._long_opts
else ""
)
cmd_s = (
- (" -s " + option._short_opts[0].replace("-", ""))
+ f" -s {option._short_opts[0].replace('-', '')}"
if option._short_opts
else ""
)
cmd_need_arg = " -r " if option.nargs in [1] else ""
cmd_helpstr = (
- (" -d " + wrap(" ".join(option.help.split())))
+ f" -d {wrap(' '.join(option.help.split()))}"
if option.help
else ""
)
cmd_arglist = (
- (" -a " + wrap(" ".join(option.choices)))
+ f" -a {wrap(' '.join(option.choices))}"
if option.choices
else ""
)
- word += (
- " ".join(
- BL_USE3.format(
- name,
- (
- cmd_need_arg
- + cmd_s
- + cmd_l
- + " -f "
- + cmd_arglist
- ),
- cmd_helpstr,
- ).split()
- )
- + "\n"
+ word += " ".join(
+ BL_USE3.format(
+ name,
+ f"{cmd_need_arg}{cmd_s}{cmd_l} -f {cmd_arglist}",
+ cmd_helpstr,
+ ).split()
)
+ word += "\n"
- word = word + " ".join(
- BL_USE3.format(
- name,
- ("-s " + "h " + "-l " + "help" + " -f "),
- ("-d " + wrap("print help") + "\n"),
- ).split()
+ word = word + BL_USE3.format(
+ name,
+ "-s h -l help -f",
+ f"-d {wrap('print help')}",
)
return word
@@ -332,9 +292,9 @@ def clean_whitespace(word):
def wrap(word):
# Need " or ' around strings but watch out if they're in the string
sptoken = '"'
- if ('"') in word and ("'") in word:
+ if '"' in word and ("'") in word:
word.replace('"', sptoken)
- return '"' + word + '"'
+ return f'"{word}"'
tok = '"' if "'" in word else "'"
- return tok + word + tok
+ return f"{tok}{word}{tok}"
diff --git a/beetsplug/fromfilename.py b/beetsplug/fromfilename.py
index 103e82901..c3fb4bc6b 100644
--- a/beetsplug/fromfilename.py
+++ b/beetsplug/fromfilename.py
@@ -12,8 +12,8 @@
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
-"""If the title is empty, try to extract track and title from the
-filename.
+"""If the title is empty, try to extract it from the filename
+(possibly also extract track and artist)
"""
import os
@@ -25,12 +25,12 @@ from beets.util import displayable_path
# Filename field extraction patterns.
PATTERNS = [
# Useful patterns.
- r"^(?P.+)[\-_](?P.+)[\-_](?P.*)$",
- r"^(?P