mirror of
https://github.com/beetbox/beets.git
synced 2026-02-08 16:34:12 +01:00
Merge branch 'master' into relative_to_take1
This commit is contained in:
commit
f757a489b1
185 changed files with 8437 additions and 10427 deletions
45
.git-blame-ignore-revs
Normal file
45
.git-blame-ignore-revs
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
# 2014
|
||||
# flake8-cleanliness in missing
|
||||
e21c04e9125a28ae0452374acf03d93315eb4381
|
||||
|
||||
# 2016
|
||||
# Removed unicode_literals from library, logging and mediafile
|
||||
43572f50b0eb3522239d94149d91223e67d9a009
|
||||
# Removed unicode_literals from plugins
|
||||
53d2c8d9db87be4d4750ad879bf46176537be73f
|
||||
# reformat flake8 errors
|
||||
1db46dfeb6607c164afb247d8da82443677795c1
|
||||
|
||||
# 2021
|
||||
# pyupgrade root
|
||||
e26276658052947e9464d9726b703335304c7c13
|
||||
# pyupgrade beets dir
|
||||
6d1316f463cb7c9390f85bf35b220e250a35004a
|
||||
# pyupgrade autotag dir
|
||||
f8b8938fd8bbe91898d0982552bc75d35703d3ef
|
||||
# pyupgrade dbcore dir
|
||||
d288f872903c79a7ee7c5a7c9cc690809441196e
|
||||
# pyupgrade ui directory
|
||||
432fa557258d9ff01e23ed750f9a86a96239599e
|
||||
# pyupgrade util dir
|
||||
af102c3e2f1c7a49e99839e2825906fe01780eec
|
||||
# fix unused import and flake8
|
||||
910354a6c617ed5aa643cff666205b43e1557373
|
||||
# pyupgrade beetsplug and tests
|
||||
1ec87a3bdd737abe46c6e614051bf9e314db4619
|
||||
|
||||
# 2022
|
||||
# Reformat flake8 config comments
|
||||
abc3dfbf429b179fac25bd1dff72d577cd4d04c7
|
||||
|
||||
# 2023
|
||||
# Apply formatting tools to all files
|
||||
a6e5201ff3fad4c69bf24d17bace2ef744b9f51b
|
||||
|
||||
# 2024
|
||||
# Reformat the codebase
|
||||
85a17ee5039628a6f3cdcb7a03d7d1bd530fbe89
|
||||
# Fix lint issues
|
||||
f36bc497c8c8f89004f3f6879908d3f0b25123e1
|
||||
# Remove some lint exclusions and fix the issues
|
||||
5f78d1b82b2292d5ce0c99623ba0ec444b80d24c
|
||||
15
.github/workflows/ci.yaml
vendored
15
.github/workflows/ci.yaml
vendored
|
|
@ -14,10 +14,10 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
platform: [ubuntu-latest, windows-latest]
|
||||
python-version: ["3.8", "3.9"]
|
||||
python-version: ["3.9"]
|
||||
runs-on: ${{ matrix.platform }}
|
||||
env:
|
||||
IS_MAIN_PYTHON: ${{ matrix.python-version == '3.8' && matrix.platform == 'ubuntu-latest' }}
|
||||
IS_MAIN_PYTHON: ${{ matrix.python-version == '3.9' && matrix.platform == 'ubuntu-latest' }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Python tools
|
||||
|
|
@ -29,15 +29,16 @@ jobs:
|
|||
python-version: ${{ matrix.python-version }}
|
||||
cache: poetry
|
||||
|
||||
- name: Install PyGobject dependencies on Ubuntu
|
||||
- name: Install PyGobject and release script dependencies on Ubuntu
|
||||
if: matrix.platform == 'ubuntu-latest'
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install ffmpeg gobject-introspection libgirepository1.0-dev
|
||||
poetry install --extras replaygain
|
||||
sudo apt install ffmpeg gobject-introspection libgirepository1.0-dev pandoc
|
||||
poetry install --with=release --extras=docs --extras=replaygain --extras=reflink
|
||||
poe docs
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: poetry install --only=main,test
|
||||
run: poetry install --only=main,test --extras=autobpm
|
||||
|
||||
- if: ${{ env.IS_MAIN_PYTHON != 'true' }}
|
||||
name: Test without coverage
|
||||
|
|
@ -75,4 +76,4 @@ jobs:
|
|||
uses: codecov/codecov-action@v4
|
||||
with:
|
||||
files: ./coverage.xml
|
||||
use_oidc: true
|
||||
use_oidc: ${{ !(github.event_name == 'pull_request' && github.event.pull_request.head.repo.fork) }}
|
||||
|
|
|
|||
13
.github/workflows/lint.yml
vendored
13
.github/workflows/lint.yml
vendored
|
|
@ -7,7 +7,7 @@ on:
|
|||
- master
|
||||
|
||||
env:
|
||||
PYTHON_VERSION: 3.8
|
||||
PYTHON_VERSION: 3.9
|
||||
|
||||
jobs:
|
||||
changed-files:
|
||||
|
|
@ -60,7 +60,7 @@ jobs:
|
|||
cache: poetry
|
||||
|
||||
- name: Install dependencies
|
||||
run: poetry install --only=format
|
||||
run: poetry install --only=lint
|
||||
|
||||
- name: Check code formatting
|
||||
# the job output will contain colored diffs with what needs adjusting
|
||||
|
|
@ -84,10 +84,7 @@ jobs:
|
|||
run: poetry install --only=lint
|
||||
|
||||
- name: Lint code
|
||||
uses: liskin/gh-problem-matcher-wrap@v3
|
||||
with:
|
||||
linters: flake8
|
||||
run: poe lint ${{ needs.changed-files.outputs.changed_python_files }}
|
||||
run: poe lint --output-format=github ${{ needs.changed-files.outputs.changed_python_files }}
|
||||
|
||||
mypy:
|
||||
if: needs.changed-files.outputs.any_python_changed == 'true'
|
||||
|
|
@ -128,13 +125,13 @@ jobs:
|
|||
cache: poetry
|
||||
|
||||
- name: Install dependencies
|
||||
run: poetry install --only=docs
|
||||
run: poetry install --extras=docs
|
||||
|
||||
- name: Add Sphinx problem matcher
|
||||
run: echo "::add-matcher::.github/sphinx-problem-matcher.json"
|
||||
|
||||
- name: Build docs
|
||||
run: |
|
||||
run: |-
|
||||
poe docs |& tee /tmp/output
|
||||
# fail the job if there are issues
|
||||
grep -q " WARNING:" /tmp/output && exit 1 || exit 0
|
||||
|
|
|
|||
42
.github/workflows/make_release.yaml
vendored
42
.github/workflows/make_release.yaml
vendored
|
|
@ -10,10 +10,11 @@ on:
|
|||
env:
|
||||
PYTHON_VERSION: 3.8
|
||||
NEW_VERSION: ${{ inputs.version }}
|
||||
NEW_TAG: v${{ inputs.version }}
|
||||
|
||||
jobs:
|
||||
increment-version:
|
||||
name: Bump project version and commit it
|
||||
name: Bump version, commit and create tag
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
|
@ -28,20 +29,26 @@ jobs:
|
|||
run: poetry install --only=release
|
||||
|
||||
- name: Bump project version
|
||||
id: script
|
||||
run: poe bump "${{ env.NEW_VERSION }}"
|
||||
|
||||
- uses: EndBug/add-and-commit@v9
|
||||
name: Commit the changes
|
||||
id: commit_and_tag
|
||||
name: Commit the changes and create tag
|
||||
with:
|
||||
message: "Increment version to ${{ env.NEW_VERSION }}"
|
||||
tag: "${{ env.NEW_TAG }} --force"
|
||||
|
||||
build:
|
||||
name: Get changelog and build the distribution package
|
||||
runs-on: ubuntu-latest
|
||||
needs: increment-version
|
||||
outputs:
|
||||
changelog: ${{ steps.generate_changelog.outputs.changelog }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ env.NEW_TAG }}
|
||||
|
||||
- name: Install Python tools
|
||||
uses: BrandonLWhite/pipx-install-action@v0.1.1
|
||||
- uses: actions/setup-python@v5
|
||||
|
|
@ -50,16 +57,23 @@ jobs:
|
|||
cache: poetry
|
||||
|
||||
- name: Install dependencies
|
||||
run: poetry install --only=release
|
||||
run: poetry install --with=release --extras=docs
|
||||
|
||||
- name: Install pandoc
|
||||
run: sudo apt update && sudo apt install pandoc -y
|
||||
|
||||
- name: Obtain the changelog
|
||||
run: echo "changelog=$(poe changelog)" >> $GITHUB_OUTPUT
|
||||
id: generate_changelog
|
||||
run: |
|
||||
poe docs
|
||||
{
|
||||
echo 'changelog<<EOF'
|
||||
poe --quiet changelog
|
||||
echo EOF
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Build a binary wheel and a source tarball
|
||||
run: poetry build
|
||||
run: poe build
|
||||
|
||||
- name: Store the distribution packages
|
||||
uses: actions/upload-artifact@v4
|
||||
|
|
@ -88,19 +102,12 @@ jobs:
|
|||
make-github-release:
|
||||
name: Create GitHub release
|
||||
runs-on: ubuntu-latest
|
||||
needs: publish-to-pypi
|
||||
needs: [build, publish-to-pypi]
|
||||
env:
|
||||
CHANGELOG: ${{ needs.build.outputs.changelog }}
|
||||
steps:
|
||||
- name: Tag the commit
|
||||
id: tag_version
|
||||
uses: mathieudutour/github-tag-action@v6
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
custom_tag: ${{ env.NEW_VERSION }}
|
||||
|
||||
- name: Download all the dists
|
||||
uses: actions/download-artifact@v3
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: python-package-distributions
|
||||
path: dist/
|
||||
|
|
@ -108,8 +115,6 @@ jobs:
|
|||
- name: Create a GitHub release
|
||||
id: make_release
|
||||
uses: ncipollo/release-action@v1
|
||||
env:
|
||||
NEW_TAG: ${{ steps.tag_version.outputs.new_tag }}
|
||||
with:
|
||||
tag: ${{ env.NEW_TAG }}
|
||||
name: Release ${{ env.NEW_TAG }}
|
||||
|
|
@ -117,7 +122,8 @@ jobs:
|
|||
artifacts: dist/*
|
||||
- name: Send release toot to Fosstodon
|
||||
uses: cbrgm/mastodon-github-action@v2
|
||||
continue-on-error: true
|
||||
with:
|
||||
access-token: ${{ secrets.MASTODON_ACCESS_TOKEN }}
|
||||
url: ${{ secrets.MASTODON_URL }}
|
||||
message: "Version ${{ steps.tag_version.outputs.new_tag }} of beets has been released! Check out all of the new changes at ${{ steps.create_release.outputs.html_url }}"
|
||||
message: "Version ${{ env.NEW_TAG }} of beets has been released! Check out all of the new changes at ${{ steps.make_release.outputs.html_url }}"
|
||||
|
|
|
|||
3
.gitignore
vendored
3
.gitignore
vendored
|
|
@ -91,3 +91,6 @@ ENV/
|
|||
/.pydevproject
|
||||
/.settings
|
||||
.vscode
|
||||
|
||||
# pyright
|
||||
pyrightconfig.json
|
||||
|
|
|
|||
|
|
@ -2,13 +2,7 @@
|
|||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
|
||||
repos:
|
||||
- repo: https://github.com/psf/black
|
||||
rev: 24.2.0
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: v0.8.1
|
||||
hooks:
|
||||
- id: black
|
||||
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 5.13.2
|
||||
hooks:
|
||||
- id: isort
|
||||
name: isort (python)
|
||||
- id: ruff-format
|
||||
|
|
|
|||
|
|
@ -118,10 +118,10 @@ command. Instead, you can activate the virtual environment in your shell with::
|
|||
|
||||
$ poetry shell
|
||||
|
||||
You should see ``(beets-py38)`` prefix in your shell prompt. Now you can run
|
||||
You should see ``(beets-py3.9)`` prefix in your shell prompt. Now you can run
|
||||
commands directly, for example::
|
||||
|
||||
$ (beets-py38) pytest
|
||||
$ (beets-py3.9) pytest
|
||||
|
||||
Additionally, `poethepoet`_ task runner assists us with the most common
|
||||
operations. Formatting, linting, testing are defined as ``poe`` tasks in
|
||||
|
|
@ -237,7 +237,7 @@ There are a few coding conventions we use in beets:
|
|||
.. code-block:: python
|
||||
|
||||
with g.lib.transaction() as tx:
|
||||
rows = tx.query('SELECT DISTINCT "{0}" FROM "{1}" ORDER BY "{2}"'
|
||||
rows = tx.query("SELECT DISTINCT '{0}' FROM '{1}' ORDER BY '{2}'"
|
||||
.format(field, model._table, sort_field))
|
||||
|
||||
To fetch Item objects from the database, use lib.items(…) and supply
|
||||
|
|
@ -248,7 +248,7 @@ There are a few coding conventions we use in beets:
|
|||
.. code-block:: python
|
||||
|
||||
with lib.transaction() as tx:
|
||||
rows = tx.query('SELECT …')
|
||||
rows = tx.query("SELECT …")
|
||||
|
||||
Transaction objects help control concurrent access to the database
|
||||
and assist in debugging conflicting accesses.
|
||||
|
|
@ -274,14 +274,13 @@ There are a few coding conventions we use in beets:
|
|||
Style
|
||||
-----
|
||||
|
||||
We follow `black`_ formatting and `google's docstring format`_.
|
||||
We use `ruff`_ to format and lint the codebase.
|
||||
|
||||
Use ``poe check-format`` and ``poe lint`` to check your code for style and
|
||||
Run ``poe check-format`` and ``poe lint`` to check your code for style and
|
||||
linting errors. Running ``poe format`` will automatically format your code
|
||||
according to the specifications required by the project.
|
||||
|
||||
.. _black: https://black.readthedocs.io/en/stable/
|
||||
.. _google's docstring format: https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings
|
||||
.. _ruff: https://docs.astral.sh/ruff/
|
||||
|
||||
Handling Paths
|
||||
--------------
|
||||
|
|
@ -345,10 +344,10 @@ environment variable ``SKIP_SLOW_TESTS``, for example::
|
|||
Coverage
|
||||
^^^^^^^^
|
||||
|
||||
Coverage is measured automatically when running the tests. If you find it takes
|
||||
a while to calculate, disable it::
|
||||
The ``test`` command does not include coverage as it slows down testing. In
|
||||
order to measure it, use the ``test-with-coverage`` task
|
||||
|
||||
$ poe test --no-cov
|
||||
$ poe test-with-coverage [pytest options]
|
||||
|
||||
You are welcome to explore coverage by opening the HTML report in
|
||||
``.reports/html/index.html``.
|
||||
|
|
@ -379,28 +378,24 @@ Writing Tests
|
|||
Writing tests is done by adding or modifying files in folder `test`_.
|
||||
Take a look at
|
||||
`https://github.com/beetbox/beets/blob/master/test/test_template.py#L224`_
|
||||
to get a basic view on how tests are written. We currently allow writing
|
||||
tests with either `unittest`_ or `pytest`_.
|
||||
to get a basic view on how tests are written. Since we are currently migrating
|
||||
the tests from `unittest`_ to `pytest`_, new tests should be written using
|
||||
`pytest`_. Contributions migrating existing tests are welcome!
|
||||
|
||||
Any tests that involve sending out network traffic e.g. an external API
|
||||
call, should be skipped normally and run under our weekly `integration
|
||||
test`_ suite. These tests can be useful in detecting external changes
|
||||
that would affect ``beets``. In order to do this, simply add the
|
||||
following snippet before the applicable test case:
|
||||
External API requests under test should be mocked with `requests_mock`_,
|
||||
However, we still want to know whether external APIs are up and that they
|
||||
return expected responses, therefore we test them weekly with our `integration
|
||||
test`_ suite.
|
||||
|
||||
In order to add such a test, mark your test with the ``integration_test`` marker
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@unittest.skipUnless(
|
||||
os.environ.get('INTEGRATION_TEST', '0') == '1',
|
||||
'integration testing not enabled')
|
||||
@pytest.mark.integration_test
|
||||
def test_external_api_call():
|
||||
...
|
||||
|
||||
If you do this, it is also advised to create a similar test that 'mocks'
|
||||
the network call and can be run under normal circumstances by our CI and
|
||||
others. See `unittest.mock`_ for more info.
|
||||
|
||||
- **AVOID** using the ``start()`` and ``stop()`` methods of
|
||||
``mock.patch``, as they require manual cleanup. Use the annotation or
|
||||
context manager forms instead.
|
||||
This way, the test will be run only in the integration test suite.
|
||||
|
||||
.. _Codecov: https://codecov.io/github/beetbox/beets
|
||||
.. _pytest-random: https://github.com/klrmn/pytest-random
|
||||
|
|
@ -410,6 +405,6 @@ others. See `unittest.mock`_ for more info.
|
|||
.. _`https://github.com/beetbox/beets/blob/master/test/test_template.py#L224`: https://github.com/beetbox/beets/blob/master/test/test_template.py#L224
|
||||
.. _unittest: https://docs.python.org/3/library/unittest.html
|
||||
.. _integration test: https://github.com/beetbox/beets/actions?query=workflow%3A%22integration+tests%22
|
||||
.. _unittest.mock: https://docs.python.org/3/library/unittest.mock.html
|
||||
.. _requests-mock: https://requests-mock.readthedocs.io/en/latest/response.html
|
||||
.. _documentation: https://beets.readthedocs.io/en/stable/
|
||||
.. _vim: https://www.vim.org/
|
||||
|
|
|
|||
36
MANIFEST.in
36
MANIFEST.in
|
|
@ -1,36 +0,0 @@
|
|||
# Include tests (but avoid including *.pyc, etc.)
|
||||
prune test
|
||||
recursive-include test/rsrc *
|
||||
recursive-exclude test/rsrc *.pyc
|
||||
recursive-exclude test/rsrc *.pyo
|
||||
include test/*.py
|
||||
|
||||
# Include relevant text files.
|
||||
include LICENSE README.rst
|
||||
# And generated manpages.
|
||||
include man/beet.1
|
||||
include man/beetsconfig.5
|
||||
|
||||
# Include the Sphinx documentation.
|
||||
recursive-include docs *.rst *.py Makefile *.png
|
||||
prune docs/_build
|
||||
|
||||
# Resources for web plugin.
|
||||
recursive-include beetsplug/web/templates *
|
||||
recursive-include beetsplug/web/static *
|
||||
|
||||
# And for the lastgenre plugin.
|
||||
include beetsplug/lastgenre/genres.txt
|
||||
include beetsplug/lastgenre/genres-tree.yaml
|
||||
|
||||
# Exclude junk.
|
||||
global-exclude .DS_Store
|
||||
|
||||
# Include default config
|
||||
include beets/config_default.yaml
|
||||
|
||||
# Shell completion template
|
||||
include beets/ui/completion_base.sh
|
||||
|
||||
# Include extra bits
|
||||
recursive-include extra *
|
||||
|
|
@ -17,7 +17,7 @@ from sys import stderr
|
|||
|
||||
import confuse
|
||||
|
||||
__version__ = "2.0.0"
|
||||
__version__ = "2.2.0"
|
||||
__author__ = "Adrian Sampson <adrian@radbox.org>"
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
`python -m beets`.
|
||||
"""
|
||||
|
||||
|
||||
import sys
|
||||
|
||||
from .ui import main
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
music and items' embedded album art.
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
from tempfile import NamedTemporaryFile
|
||||
|
||||
|
|
|
|||
|
|
@ -12,23 +12,41 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Facilities for automatically determining files' correct metadata.
|
||||
"""
|
||||
from typing import Mapping
|
||||
"""Facilities for automatically determining files' correct metadata."""
|
||||
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import Union
|
||||
|
||||
from beets import config, logging
|
||||
from beets.library import Item
|
||||
from beets.library import Album, Item, LibModel
|
||||
|
||||
# Parts of external interface.
|
||||
from .hooks import ( # noqa
|
||||
AlbumInfo,
|
||||
AlbumMatch,
|
||||
Distance,
|
||||
TrackInfo,
|
||||
TrackMatch,
|
||||
from beets.util import unique_list
|
||||
|
||||
from .hooks import AlbumInfo, AlbumMatch, Distance, TrackInfo, TrackMatch
|
||||
from .match import (
|
||||
Proposal,
|
||||
Recommendation,
|
||||
current_metadata,
|
||||
tag_album,
|
||||
tag_item,
|
||||
)
|
||||
from .match import Recommendation # noqa
|
||||
from .match import Proposal, current_metadata, tag_album, tag_item # noqa
|
||||
|
||||
__all__ = [
|
||||
"AlbumInfo",
|
||||
"AlbumMatch",
|
||||
"Distance",
|
||||
"TrackInfo",
|
||||
"TrackMatch",
|
||||
"Proposal",
|
||||
"Recommendation",
|
||||
"apply_album_metadata",
|
||||
"apply_item_metadata",
|
||||
"apply_metadata",
|
||||
"current_metadata",
|
||||
"tag_album",
|
||||
"tag_item",
|
||||
]
|
||||
|
||||
# Global logger.
|
||||
log = logging.getLogger("beets")
|
||||
|
|
@ -80,6 +98,71 @@ SPECIAL_FIELDS = {
|
|||
# Additional utilities for the main interface.
|
||||
|
||||
|
||||
def _apply_metadata(
|
||||
info: Union[AlbumInfo, TrackInfo],
|
||||
db_obj: Union[Album, Item],
|
||||
nullable_fields: Sequence[str] = [],
|
||||
):
|
||||
"""Set the db_obj's metadata to match the info."""
|
||||
special_fields = SPECIAL_FIELDS[
|
||||
"album" if isinstance(info, AlbumInfo) else "track"
|
||||
]
|
||||
|
||||
for field, value in info.items():
|
||||
# We only overwrite fields that are not already hardcoded.
|
||||
if field in special_fields:
|
||||
continue
|
||||
|
||||
# Don't overwrite fields with empty values unless the
|
||||
# field is explicitly allowed to be overwritten.
|
||||
if value is None and field not in nullable_fields:
|
||||
continue
|
||||
|
||||
db_obj[field] = value
|
||||
|
||||
|
||||
def correct_list_fields(m: LibModel) -> None:
|
||||
"""Synchronise single and list values for the list fields that we use.
|
||||
|
||||
That is, ensure the same value in the single field and the first element
|
||||
in the list.
|
||||
|
||||
For context, the value we set as, say, ``mb_artistid`` is simply ignored:
|
||||
Under the current :class:`MediaFile` implementation, fields ``albumtype``,
|
||||
``mb_artistid`` and ``mb_albumartistid`` are mapped to the first element of
|
||||
``albumtypes``, ``mb_artistids`` and ``mb_albumartistids`` respectively.
|
||||
|
||||
This means setting ``mb_artistid`` has no effect. However, beets
|
||||
functionality still assumes that ``mb_artistid`` is independent and stores
|
||||
its value in the database. If ``mb_artistid`` != ``mb_artistids[0]``,
|
||||
``beet write`` command thinks that ``mb_artistid`` is modified and tries to
|
||||
update the field in the file. Of course nothing happens, so the same diff
|
||||
is shown every time the command is run.
|
||||
|
||||
We can avoid this issue by ensuring that ``mb_artistid`` has the same value
|
||||
as ``mb_artistids[0]``, and that's what this function does.
|
||||
|
||||
Note: :class:`Album` model does not have ``mb_artistids`` and
|
||||
``mb_albumartistids`` fields therefore we need to check for their presence.
|
||||
"""
|
||||
|
||||
def ensure_first_value(single_field: str, list_field: str) -> None:
|
||||
"""Ensure the first ``list_field`` item is equal to ``single_field``."""
|
||||
single_val, list_val = getattr(m, single_field), getattr(m, list_field)
|
||||
if single_val:
|
||||
setattr(m, list_field, unique_list([single_val, *list_val]))
|
||||
elif list_val:
|
||||
setattr(m, single_field, list_val[0])
|
||||
|
||||
ensure_first_value("albumtype", "albumtypes")
|
||||
|
||||
if hasattr(m, "mb_artistids"):
|
||||
ensure_first_value("mb_artistid", "mb_artistids")
|
||||
|
||||
if hasattr(m, "mb_albumartistids"):
|
||||
ensure_first_value("mb_albumartistid", "mb_albumartistids")
|
||||
|
||||
|
||||
def apply_item_metadata(item: Item, track_info: TrackInfo):
|
||||
"""Set an item's metadata from its matched TrackInfo object."""
|
||||
item.artist = track_info.artist
|
||||
|
|
@ -96,18 +179,19 @@ def apply_item_metadata(item: Item, track_info: TrackInfo):
|
|||
if track_info.artists_ids:
|
||||
item.mb_artistids = track_info.artists_ids
|
||||
|
||||
for field, value in track_info.items():
|
||||
# We only overwrite fields that are not already hardcoded.
|
||||
if field in SPECIAL_FIELDS["track"]:
|
||||
continue
|
||||
if value is None:
|
||||
continue
|
||||
item[field] = value
|
||||
_apply_metadata(track_info, item)
|
||||
correct_list_fields(item)
|
||||
|
||||
# At the moment, the other metadata is left intact (including album
|
||||
# and track number). Perhaps these should be emptied?
|
||||
|
||||
|
||||
def apply_album_metadata(album_info: AlbumInfo, album: Album):
|
||||
"""Set the album's metadata to match the AlbumInfo object."""
|
||||
_apply_metadata(album_info, album)
|
||||
correct_list_fields(album)
|
||||
|
||||
|
||||
def apply_metadata(album_info: AlbumInfo, mapping: Mapping[Item, TrackInfo]):
|
||||
"""Set the items' metadata to match an AlbumInfo object using a
|
||||
mapping from Items to TrackInfo objects.
|
||||
|
|
@ -218,21 +302,16 @@ def apply_metadata(album_info: AlbumInfo, mapping: Mapping[Item, TrackInfo]):
|
|||
# Track alt.
|
||||
item.track_alt = track_info.track_alt
|
||||
|
||||
# Don't overwrite fields with empty values unless the
|
||||
# field is explicitly allowed to be overwritten
|
||||
for field, value in album_info.items():
|
||||
if field in SPECIAL_FIELDS["album"]:
|
||||
continue
|
||||
clobber = field in config["overwrite_null"]["album"].as_str_seq()
|
||||
if value is None and not clobber:
|
||||
continue
|
||||
item[field] = value
|
||||
_apply_metadata(
|
||||
album_info,
|
||||
item,
|
||||
nullable_fields=config["overwrite_null"]["album"].as_str_seq(),
|
||||
)
|
||||
|
||||
for field, value in track_info.items():
|
||||
if field in SPECIAL_FIELDS["track"]:
|
||||
continue
|
||||
clobber = field in config["overwrite_null"]["track"].as_str_seq()
|
||||
value = getattr(track_info, field)
|
||||
if value is None and not clobber:
|
||||
continue
|
||||
item[field] = value
|
||||
_apply_metadata(
|
||||
track_info,
|
||||
item,
|
||||
nullable_fields=config["overwrite_null"]["track"].as_str_seq(),
|
||||
)
|
||||
|
||||
correct_list_fields(item)
|
||||
|
|
|
|||
|
|
@ -17,37 +17,28 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from collections import namedtuple
|
||||
from functools import total_ordering
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Optional,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Any, Callable, NamedTuple, TypeVar, cast
|
||||
|
||||
from jellyfish import levenshtein_distance
|
||||
from unidecode import unidecode
|
||||
|
||||
from beets import config, logging, plugins
|
||||
from beets.autotag import mb
|
||||
from beets.library import Item
|
||||
from beets.util import as_string, cached_classproperty
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterable, Iterator
|
||||
|
||||
from beets.library import Item
|
||||
|
||||
log = logging.getLogger("beets")
|
||||
|
||||
V = TypeVar("V")
|
||||
|
||||
|
||||
# Classes used to represent candidate options.
|
||||
class AttrDict(Dict[str, V]):
|
||||
class AttrDict(dict[str, V]):
|
||||
"""A dictionary that supports attribute ("dot") access, so `d.field`
|
||||
is equivalent to `d['field']`.
|
||||
"""
|
||||
|
|
@ -82,47 +73,47 @@ class AlbumInfo(AttrDict):
|
|||
# TYPING: are all of these correct? I've assumed optional strings
|
||||
def __init__(
|
||||
self,
|
||||
tracks: List[TrackInfo],
|
||||
album: Optional[str] = None,
|
||||
album_id: Optional[str] = None,
|
||||
artist: Optional[str] = None,
|
||||
artist_id: Optional[str] = None,
|
||||
artists: Optional[List[str]] = None,
|
||||
artists_ids: Optional[List[str]] = None,
|
||||
asin: Optional[str] = None,
|
||||
albumtype: Optional[str] = None,
|
||||
albumtypes: Optional[List[str]] = None,
|
||||
tracks: list[TrackInfo],
|
||||
album: str | None = None,
|
||||
album_id: str | None = None,
|
||||
artist: str | None = None,
|
||||
artist_id: str | None = None,
|
||||
artists: list[str] | None = None,
|
||||
artists_ids: list[str] | None = None,
|
||||
asin: str | None = None,
|
||||
albumtype: str | None = None,
|
||||
albumtypes: list[str] | None = None,
|
||||
va: bool = False,
|
||||
year: Optional[int] = None,
|
||||
month: Optional[int] = None,
|
||||
day: Optional[int] = None,
|
||||
label: Optional[str] = None,
|
||||
barcode: Optional[str] = None,
|
||||
mediums: Optional[int] = None,
|
||||
artist_sort: Optional[str] = None,
|
||||
artists_sort: Optional[List[str]] = None,
|
||||
releasegroup_id: Optional[str] = None,
|
||||
release_group_title: Optional[str] = None,
|
||||
catalognum: Optional[str] = None,
|
||||
script: Optional[str] = None,
|
||||
language: Optional[str] = None,
|
||||
country: Optional[str] = None,
|
||||
style: Optional[str] = None,
|
||||
genre: Optional[str] = None,
|
||||
albumstatus: Optional[str] = None,
|
||||
media: Optional[str] = None,
|
||||
albumdisambig: Optional[str] = None,
|
||||
releasegroupdisambig: Optional[str] = None,
|
||||
artist_credit: Optional[str] = None,
|
||||
artists_credit: Optional[List[str]] = None,
|
||||
original_year: Optional[int] = None,
|
||||
original_month: Optional[int] = None,
|
||||
original_day: Optional[int] = None,
|
||||
data_source: Optional[str] = None,
|
||||
data_url: Optional[str] = None,
|
||||
discogs_albumid: Optional[str] = None,
|
||||
discogs_labelid: Optional[str] = None,
|
||||
discogs_artistid: Optional[str] = None,
|
||||
year: int | None = None,
|
||||
month: int | None = None,
|
||||
day: int | None = None,
|
||||
label: str | None = None,
|
||||
barcode: str | None = None,
|
||||
mediums: int | None = None,
|
||||
artist_sort: str | None = None,
|
||||
artists_sort: list[str] | None = None,
|
||||
releasegroup_id: str | None = None,
|
||||
release_group_title: str | None = None,
|
||||
catalognum: str | None = None,
|
||||
script: str | None = None,
|
||||
language: str | None = None,
|
||||
country: str | None = None,
|
||||
style: str | None = None,
|
||||
genre: str | None = None,
|
||||
albumstatus: str | None = None,
|
||||
media: str | None = None,
|
||||
albumdisambig: str | None = None,
|
||||
releasegroupdisambig: str | None = None,
|
||||
artist_credit: str | None = None,
|
||||
artists_credit: list[str] | None = None,
|
||||
original_year: int | None = None,
|
||||
original_month: int | None = None,
|
||||
original_day: int | None = None,
|
||||
data_source: str | None = None,
|
||||
data_url: str | None = None,
|
||||
discogs_albumid: str | None = None,
|
||||
discogs_labelid: str | None = None,
|
||||
discogs_artistid: str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
self.album = album
|
||||
|
|
@ -168,42 +159,6 @@ class AlbumInfo(AttrDict):
|
|||
self.discogs_artistid = discogs_artistid
|
||||
self.update(kwargs)
|
||||
|
||||
# Work around a bug in python-musicbrainz-ngs that causes some
|
||||
# strings to be bytes rather than Unicode.
|
||||
# https://github.com/alastair/python-musicbrainz-ngs/issues/85
|
||||
def decode(self, codec: str = "utf-8"):
|
||||
"""Ensure that all string attributes on this object, and the
|
||||
constituent `TrackInfo` objects, are decoded to Unicode.
|
||||
"""
|
||||
for fld in [
|
||||
"album",
|
||||
"artist",
|
||||
"albumtype",
|
||||
"label",
|
||||
"barcode",
|
||||
"artist_sort",
|
||||
"catalognum",
|
||||
"script",
|
||||
"language",
|
||||
"country",
|
||||
"style",
|
||||
"genre",
|
||||
"albumstatus",
|
||||
"albumdisambig",
|
||||
"releasegroupdisambig",
|
||||
"artist_credit",
|
||||
"media",
|
||||
"discogs_albumid",
|
||||
"discogs_labelid",
|
||||
"discogs_artistid",
|
||||
]:
|
||||
value = getattr(self, fld)
|
||||
if isinstance(value, bytes):
|
||||
setattr(self, fld, value.decode(codec, "ignore"))
|
||||
|
||||
for track in self.tracks:
|
||||
track.decode(codec)
|
||||
|
||||
def copy(self) -> AlbumInfo:
|
||||
dupe = AlbumInfo([])
|
||||
dupe.update(self)
|
||||
|
|
@ -226,38 +181,38 @@ class TrackInfo(AttrDict):
|
|||
# TYPING: are all of these correct? I've assumed optional strings
|
||||
def __init__(
|
||||
self,
|
||||
title: Optional[str] = None,
|
||||
track_id: Optional[str] = None,
|
||||
release_track_id: Optional[str] = None,
|
||||
artist: Optional[str] = None,
|
||||
artist_id: Optional[str] = None,
|
||||
artists: Optional[List[str]] = None,
|
||||
artists_ids: Optional[List[str]] = None,
|
||||
length: Optional[float] = None,
|
||||
index: Optional[int] = None,
|
||||
medium: Optional[int] = None,
|
||||
medium_index: Optional[int] = None,
|
||||
medium_total: Optional[int] = None,
|
||||
artist_sort: Optional[str] = None,
|
||||
artists_sort: Optional[List[str]] = None,
|
||||
disctitle: Optional[str] = None,
|
||||
artist_credit: Optional[str] = None,
|
||||
artists_credit: Optional[List[str]] = None,
|
||||
data_source: Optional[str] = None,
|
||||
data_url: Optional[str] = None,
|
||||
media: Optional[str] = None,
|
||||
lyricist: Optional[str] = None,
|
||||
composer: Optional[str] = None,
|
||||
composer_sort: Optional[str] = None,
|
||||
arranger: Optional[str] = None,
|
||||
track_alt: Optional[str] = None,
|
||||
work: Optional[str] = None,
|
||||
mb_workid: Optional[str] = None,
|
||||
work_disambig: Optional[str] = None,
|
||||
bpm: Optional[str] = None,
|
||||
initial_key: Optional[str] = None,
|
||||
genre: Optional[str] = None,
|
||||
album: Optional[str] = None,
|
||||
title: str | None = None,
|
||||
track_id: str | None = None,
|
||||
release_track_id: str | None = None,
|
||||
artist: str | None = None,
|
||||
artist_id: str | None = None,
|
||||
artists: list[str] | None = None,
|
||||
artists_ids: list[str] | None = None,
|
||||
length: float | None = None,
|
||||
index: int | None = None,
|
||||
medium: int | None = None,
|
||||
medium_index: int | None = None,
|
||||
medium_total: int | None = None,
|
||||
artist_sort: str | None = None,
|
||||
artists_sort: list[str] | None = None,
|
||||
disctitle: str | None = None,
|
||||
artist_credit: str | None = None,
|
||||
artists_credit: list[str] | None = None,
|
||||
data_source: str | None = None,
|
||||
data_url: str | None = None,
|
||||
media: str | None = None,
|
||||
lyricist: str | None = None,
|
||||
composer: str | None = None,
|
||||
composer_sort: str | None = None,
|
||||
arranger: str | None = None,
|
||||
track_alt: str | None = None,
|
||||
work: str | None = None,
|
||||
mb_workid: str | None = None,
|
||||
work_disambig: str | None = None,
|
||||
bpm: str | None = None,
|
||||
initial_key: str | None = None,
|
||||
genre: str | None = None,
|
||||
album: str | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
self.title = title
|
||||
|
|
@ -294,24 +249,6 @@ class TrackInfo(AttrDict):
|
|||
self.album = album
|
||||
self.update(kwargs)
|
||||
|
||||
# As above, work around a bug in python-musicbrainz-ngs.
|
||||
def decode(self, codec="utf-8"):
|
||||
"""Ensure that all string attributes on this object are decoded
|
||||
to Unicode.
|
||||
"""
|
||||
for fld in [
|
||||
"title",
|
||||
"artist",
|
||||
"medium",
|
||||
"artist_sort",
|
||||
"disctitle",
|
||||
"artist_credit",
|
||||
"media",
|
||||
]:
|
||||
value = getattr(self, fld)
|
||||
if isinstance(value, bytes):
|
||||
setattr(self, fld, value.decode(codec, "ignore"))
|
||||
|
||||
def copy(self) -> TrackInfo:
|
||||
dupe = TrackInfo()
|
||||
dupe.update(self)
|
||||
|
|
@ -355,7 +292,7 @@ def _string_dist_basic(str1: str, str2: str) -> float:
|
|||
return levenshtein_distance(str1, str2) / float(max(len(str1), len(str2)))
|
||||
|
||||
|
||||
def string_dist(str1: Optional[str], str2: Optional[str]) -> float:
|
||||
def string_dist(str1: str | None, str2: str | None) -> float:
|
||||
"""Gives an "intuitive" edit distance between two strings. This is
|
||||
an edit distance, normalized by the string length, with a number of
|
||||
tweaks that reflect intuition about text.
|
||||
|
|
@ -422,10 +359,10 @@ class Distance:
|
|||
|
||||
def __init__(self):
|
||||
self._penalties = {}
|
||||
self.tracks: Dict[TrackInfo, Distance] = {}
|
||||
self.tracks: dict[TrackInfo, Distance] = {}
|
||||
|
||||
@cached_classproperty
|
||||
def _weights(cls) -> Dict[str, float]: # noqa: N805
|
||||
def _weights(cls) -> dict[str, float]:
|
||||
"""A dictionary from keys to floating-point weights."""
|
||||
weights_view = config["match"]["distance_weights"]
|
||||
weights = {}
|
||||
|
|
@ -461,7 +398,7 @@ class Distance:
|
|||
dist_raw += sum(penalty) * self._weights[key]
|
||||
return dist_raw
|
||||
|
||||
def items(self) -> List[Tuple[str, float]]:
|
||||
def items(self) -> list[tuple[str, float]]:
|
||||
"""Return a list of (key, dist) pairs, with `dist` being the
|
||||
weighted distance, sorted from highest to lowest. Does not
|
||||
include penalties with a zero value.
|
||||
|
|
@ -511,16 +448,16 @@ class Distance:
|
|||
return dist / dist_max
|
||||
return 0.0
|
||||
|
||||
def __iter__(self) -> Iterator[Tuple[str, float]]:
|
||||
def __iter__(self) -> Iterator[tuple[str, float]]:
|
||||
return iter(self.items())
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self.items())
|
||||
|
||||
def keys(self) -> List[str]:
|
||||
def keys(self) -> list[str]:
|
||||
return [key for key, _ in self.items()]
|
||||
|
||||
def update(self, dist: "Distance"):
|
||||
def update(self, dist: Distance):
|
||||
"""Adds all the distance penalties from `dist`."""
|
||||
if not isinstance(dist, Distance):
|
||||
raise ValueError(
|
||||
|
|
@ -531,7 +468,7 @@ class Distance:
|
|||
|
||||
# Adding components.
|
||||
|
||||
def _eq(self, value1: Union[re.Pattern[str], Any], value2: Any) -> bool:
|
||||
def _eq(self, value1: re.Pattern[str] | Any, value2: Any) -> bool:
|
||||
"""Returns True if `value1` is equal to `value2`. `value1` may
|
||||
be a compiled regular expression, in which case it will be
|
||||
matched against `value2`.
|
||||
|
|
@ -555,7 +492,7 @@ class Distance:
|
|||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
options: Union[List[Any], Tuple[Any, ...], Any],
|
||||
options: list[Any] | tuple[Any, ...] | Any,
|
||||
):
|
||||
"""Adds a distance penalty of 1.0 if `value` doesn't match any
|
||||
of the values in `options`. If an option is a compiled regular
|
||||
|
|
@ -598,7 +535,7 @@ class Distance:
|
|||
self,
|
||||
key: str,
|
||||
value: Any,
|
||||
options: Union[List[Any], Tuple[Any, ...], Any],
|
||||
options: list[Any] | tuple[Any, ...] | Any,
|
||||
):
|
||||
"""Adds a distance penalty that corresponds to the position at
|
||||
which `value` appears in `options`. A distance penalty of 0.0
|
||||
|
|
@ -620,8 +557,8 @@ class Distance:
|
|||
def add_ratio(
|
||||
self,
|
||||
key: str,
|
||||
number1: Union[int, float],
|
||||
number2: Union[int, float],
|
||||
number1: int | float,
|
||||
number2: int | float,
|
||||
):
|
||||
"""Adds a distance penalty for `number1` as a ratio of `number2`.
|
||||
`number1` is bound at 0 and `number2`.
|
||||
|
|
@ -633,7 +570,7 @@ class Distance:
|
|||
dist = 0.0
|
||||
self.add(key, dist)
|
||||
|
||||
def add_string(self, key: str, str1: Optional[str], str2: Optional[str]):
|
||||
def add_string(self, key: str, str1: str | None, str2: str | None):
|
||||
"""Adds a distance penalty based on the edit distance between
|
||||
`str1` and `str2`.
|
||||
"""
|
||||
|
|
@ -643,17 +580,24 @@ class Distance:
|
|||
|
||||
# Structures that compose all the information for a candidate match.
|
||||
|
||||
AlbumMatch = namedtuple(
|
||||
"AlbumMatch", ["distance", "info", "mapping", "extra_items", "extra_tracks"]
|
||||
)
|
||||
|
||||
TrackMatch = namedtuple("TrackMatch", ["distance", "info"])
|
||||
class AlbumMatch(NamedTuple):
|
||||
distance: Distance
|
||||
info: AlbumInfo
|
||||
mapping: dict[Item, TrackInfo]
|
||||
extra_items: list[Item]
|
||||
extra_tracks: list[TrackInfo]
|
||||
|
||||
|
||||
class TrackMatch(NamedTuple):
|
||||
distance: Distance
|
||||
info: TrackInfo
|
||||
|
||||
|
||||
# Aggregation of sources.
|
||||
|
||||
|
||||
def album_for_mbid(release_id: str) -> Optional[AlbumInfo]:
|
||||
def album_for_mbid(release_id: str) -> AlbumInfo | None:
|
||||
"""Get an AlbumInfo object for a MusicBrainz release ID. Return None
|
||||
if the ID is not found.
|
||||
"""
|
||||
|
|
@ -667,7 +611,7 @@ def album_for_mbid(release_id: str) -> Optional[AlbumInfo]:
|
|||
return None
|
||||
|
||||
|
||||
def track_for_mbid(recording_id: str) -> Optional[TrackInfo]:
|
||||
def track_for_mbid(recording_id: str) -> TrackInfo | None:
|
||||
"""Get a TrackInfo object for a MusicBrainz recording ID. Return None
|
||||
if the ID is not found.
|
||||
"""
|
||||
|
|
@ -713,12 +657,12 @@ def invoke_mb(call_func: Callable, *args):
|
|||
|
||||
@plugins.notify_info_yielded("albuminfo_received")
|
||||
def album_candidates(
|
||||
items: List[Item],
|
||||
items: list[Item],
|
||||
artist: str,
|
||||
album: str,
|
||||
va_likely: bool,
|
||||
extra_tags: Dict,
|
||||
) -> Iterable[Tuple]:
|
||||
extra_tags: dict,
|
||||
) -> Iterable[tuple]:
|
||||
"""Search for album matches. ``items`` is a list of Item objects
|
||||
that make up the album. ``artist`` and ``album`` are the respective
|
||||
names (strings), which may be derived from the item list or may be
|
||||
|
|
@ -746,7 +690,7 @@ def album_candidates(
|
|||
|
||||
|
||||
@plugins.notify_info_yielded("trackinfo_received")
|
||||
def item_candidates(item: Item, artist: str, title: str) -> Iterable[Tuple]:
|
||||
def item_candidates(item: Item, artist: str, title: str) -> Iterable[tuple]:
|
||||
"""Search for item matches. ``item`` is the Item to be matched.
|
||||
``artist`` and ``title`` are strings and either reflect the item or
|
||||
are specified by the user.
|
||||
|
|
|
|||
|
|
@ -16,24 +16,17 @@
|
|||
releases and tracks.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime
|
||||
import re
|
||||
from collections import namedtuple
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Iterable,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from collections.abc import Iterable, Sequence
|
||||
from enum import IntEnum
|
||||
from functools import cache
|
||||
from typing import TYPE_CHECKING, Any, NamedTuple, TypeVar, cast
|
||||
|
||||
from munkres import Munkres
|
||||
import lap
|
||||
import numpy as np
|
||||
|
||||
from beets import config, logging, plugins
|
||||
from beets.autotag import (
|
||||
|
|
@ -44,9 +37,10 @@ from beets.autotag import (
|
|||
TrackMatch,
|
||||
hooks,
|
||||
)
|
||||
from beets.library import Item
|
||||
from beets.util import plurality
|
||||
from beets.util.enumeration import OrderedEnum
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from beets.library import Item
|
||||
|
||||
# Artist signals that indicate "various artists". These are used at the
|
||||
# album level to determine whether a given release is likely a VA
|
||||
|
|
@ -61,7 +55,7 @@ log = logging.getLogger("beets")
|
|||
# Recommendation enumeration.
|
||||
|
||||
|
||||
class Recommendation(OrderedEnum):
|
||||
class Recommendation(IntEnum):
|
||||
"""Indicates a qualitative suggestion to the user about what should
|
||||
be done with a given match.
|
||||
"""
|
||||
|
|
@ -76,7 +70,10 @@ class Recommendation(OrderedEnum):
|
|||
# consists of a list of possible candidates (i.e., AlbumInfo or TrackInfo
|
||||
# objects) and a recommendation value.
|
||||
|
||||
Proposal = namedtuple("Proposal", ("candidates", "recommendation"))
|
||||
|
||||
class Proposal(NamedTuple):
|
||||
candidates: Sequence[AlbumMatch | TrackMatch]
|
||||
recommendation: Recommendation
|
||||
|
||||
|
||||
# Primary matching functionality.
|
||||
|
|
@ -84,7 +81,7 @@ Proposal = namedtuple("Proposal", ("candidates", "recommendation"))
|
|||
|
||||
def current_metadata(
|
||||
items: Iterable[Item],
|
||||
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
|
||||
) -> tuple[dict[str, Any], dict[str, Any]]:
|
||||
"""Extract the likely current metadata for an album given a list of its
|
||||
items. Return two dictionaries:
|
||||
- The most common value for each field.
|
||||
|
|
@ -123,29 +120,29 @@ def current_metadata(
|
|||
def assign_items(
|
||||
items: Sequence[Item],
|
||||
tracks: Sequence[TrackInfo],
|
||||
) -> Tuple[Dict[Item, TrackInfo], List[Item], List[TrackInfo]]:
|
||||
) -> tuple[dict[Item, TrackInfo], list[Item], list[TrackInfo]]:
|
||||
"""Given a list of Items and a list of TrackInfo objects, find the
|
||||
best mapping between them. Returns a mapping from Items to TrackInfo
|
||||
objects, a set of extra Items, and a set of extra TrackInfo
|
||||
objects. These "extra" objects occur when there is an unequal number
|
||||
of objects of the two types.
|
||||
"""
|
||||
# Construct the cost matrix.
|
||||
costs: List[List[Distance]] = []
|
||||
for item in items:
|
||||
row = []
|
||||
for track in tracks:
|
||||
row.append(track_distance(item, track))
|
||||
costs.append(row)
|
||||
|
||||
# Find a minimum-cost bipartite matching.
|
||||
log.debug("Computing track assignment...")
|
||||
matching = Munkres().compute(costs)
|
||||
# Construct the cost matrix.
|
||||
costs = [[float(track_distance(i, t)) for t in tracks] for i in items]
|
||||
# Assign items to tracks
|
||||
_, _, assigned_item_idxs = lap.lapjv(np.array(costs), extend_cost=True)
|
||||
log.debug("...done.")
|
||||
|
||||
# Produce the output matching.
|
||||
mapping = {items[i]: tracks[j] for (i, j) in matching}
|
||||
extra_items = list(set(items) - set(mapping.keys()))
|
||||
# Each item in `assigned_item_idxs` list corresponds to a track in the
|
||||
# `tracks` list. Each value is either an index into the assigned item in
|
||||
# `items` list, or -1 if that track has no match.
|
||||
mapping = {
|
||||
items[iidx]: t
|
||||
for iidx, t in zip(assigned_item_idxs, tracks)
|
||||
if iidx != -1
|
||||
}
|
||||
extra_items = list(set(items) - mapping.keys())
|
||||
extra_items.sort(key=lambda i: (i.disc, i.track, i.title))
|
||||
extra_tracks = list(set(tracks) - set(mapping.values()))
|
||||
extra_tracks.sort(key=lambda t: (t.index, t.title))
|
||||
|
|
@ -159,6 +156,18 @@ def track_index_changed(item: Item, track_info: TrackInfo) -> bool:
|
|||
return item.track not in (track_info.medium_index, track_info.index)
|
||||
|
||||
|
||||
@cache
|
||||
def get_track_length_grace() -> float:
|
||||
"""Get cached grace period for track length matching."""
|
||||
return config["match"]["track_length_grace"].as_number()
|
||||
|
||||
|
||||
@cache
|
||||
def get_track_length_max() -> float:
|
||||
"""Get cached maximum track length for track length matching."""
|
||||
return config["match"]["track_length_max"].as_number()
|
||||
|
||||
|
||||
def track_distance(
|
||||
item: Item,
|
||||
track_info: TrackInfo,
|
||||
|
|
@ -167,23 +176,17 @@ def track_distance(
|
|||
"""Determines the significance of a track metadata change. Returns a
|
||||
Distance object. `incl_artist` indicates that a distance component should
|
||||
be included for the track artist (i.e., for various-artist releases).
|
||||
|
||||
``track_length_grace`` and ``track_length_max`` configuration options are
|
||||
cached because this function is called many times during the matching
|
||||
process and their access comes with a performance overhead.
|
||||
"""
|
||||
dist = hooks.Distance()
|
||||
|
||||
# Length.
|
||||
if track_info.length:
|
||||
item_length = cast(float, item.length)
|
||||
track_length_grace = cast(
|
||||
Union[float, int],
|
||||
config["match"]["track_length_grace"].as_number(),
|
||||
)
|
||||
track_length_max = cast(
|
||||
Union[float, int],
|
||||
config["match"]["track_length_max"].as_number(),
|
||||
)
|
||||
|
||||
diff = abs(item_length - track_info.length) - track_length_grace
|
||||
dist.add_ratio("track_length", diff, track_length_max)
|
||||
if info_length := track_info.length:
|
||||
diff = abs(item.length - info_length) - get_track_length_grace()
|
||||
dist.add_ratio("track_length", diff, get_track_length_max())
|
||||
|
||||
# Title.
|
||||
dist.add_string("track_title", item.title, track_info.title)
|
||||
|
|
@ -204,6 +207,10 @@ def track_distance(
|
|||
if item.mb_trackid:
|
||||
dist.add_expr("track_id", item.mb_trackid != track_info.track_id)
|
||||
|
||||
# Penalize mismatching disc numbers.
|
||||
if track_info.medium and item.disc:
|
||||
dist.add_expr("medium", item.disc != track_info.medium)
|
||||
|
||||
# Plugins.
|
||||
dist.update(plugins.track_distance(item, track_info))
|
||||
|
||||
|
|
@ -213,7 +220,7 @@ def track_distance(
|
|||
def distance(
|
||||
items: Sequence[Item],
|
||||
album_info: AlbumInfo,
|
||||
mapping: Dict[Item, TrackInfo],
|
||||
mapping: dict[Item, TrackInfo],
|
||||
) -> Distance:
|
||||
"""Determines how "significant" an album metadata change would be.
|
||||
Returns a Distance object. `album_info` is an AlbumInfo object
|
||||
|
|
@ -351,7 +358,7 @@ def match_by_id(items: Iterable[Item]):
|
|||
|
||||
|
||||
def _recommendation(
|
||||
results: Sequence[Union[AlbumMatch, TrackMatch]],
|
||||
results: Sequence[AlbumMatch | TrackMatch],
|
||||
) -> Recommendation:
|
||||
"""Given a sorted list of AlbumMatch or TrackMatch objects, return a
|
||||
recommendation based on the results' distances.
|
||||
|
|
@ -417,7 +424,7 @@ def _sort_candidates(candidates: Iterable[AnyMatch]) -> Sequence[AnyMatch]:
|
|||
|
||||
def _add_candidate(
|
||||
items: Sequence[Item],
|
||||
results: Dict[Any, AlbumMatch],
|
||||
results: dict[Any, AlbumMatch],
|
||||
info: AlbumInfo,
|
||||
):
|
||||
"""Given a candidate AlbumInfo object, attempt to add the candidate
|
||||
|
|
@ -469,10 +476,10 @@ def _add_candidate(
|
|||
|
||||
def tag_album(
|
||||
items,
|
||||
search_artist: Optional[str] = None,
|
||||
search_album: Optional[str] = None,
|
||||
search_ids: List[str] = [],
|
||||
) -> Tuple[str, str, Proposal]:
|
||||
search_artist: str | None = None,
|
||||
search_album: str | None = None,
|
||||
search_ids: list[str] = [],
|
||||
) -> tuple[str, str, Proposal]:
|
||||
"""Return a tuple of the current artist name, the current album
|
||||
name, and a `Proposal` containing `AlbumMatch` candidates.
|
||||
|
||||
|
|
@ -497,7 +504,7 @@ def tag_album(
|
|||
log.debug("Tagging {0} - {1}", cur_artist, cur_album)
|
||||
|
||||
# The output result, keys are the MB album ID.
|
||||
candidates: Dict[Any, AlbumMatch] = {}
|
||||
candidates: dict[Any, AlbumMatch] = {}
|
||||
|
||||
# Search by explicit ID.
|
||||
if search_ids:
|
||||
|
|
@ -561,9 +568,9 @@ def tag_album(
|
|||
|
||||
def tag_item(
|
||||
item,
|
||||
search_artist: Optional[str] = None,
|
||||
search_title: Optional[str] = None,
|
||||
search_ids: Optional[List[str]] = None,
|
||||
search_artist: str | None = None,
|
||||
search_title: str | None = None,
|
||||
search_ids: list[str] | None = None,
|
||||
) -> Proposal:
|
||||
"""Find metadata for a single track. Return a `Proposal` consisting
|
||||
of `TrackMatch` objects.
|
||||
|
|
@ -576,7 +583,7 @@ def tag_item(
|
|||
# Holds candidates found so far: keys are MBIDs; values are
|
||||
# (distance, TrackInfo) pairs.
|
||||
candidates = {}
|
||||
rec: Optional[Recommendation] = None
|
||||
rec: Recommendation | None = None
|
||||
|
||||
# First, try matching by MusicBrainz ID.
|
||||
trackids = search_ids or [t for t in [item.mb_trackid] if t]
|
||||
|
|
|
|||
|
|
@ -12,14 +12,16 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Searches for albums in the MusicBrainz database.
|
||||
"""
|
||||
"""Searches for albums in the MusicBrainz database."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import traceback
|
||||
from collections import Counter
|
||||
from typing import Any, Dict, Iterator, List, Optional, Sequence, Tuple, cast
|
||||
from collections.abc import Iterator, Sequence
|
||||
from itertools import product
|
||||
from typing import Any, cast
|
||||
from urllib.parse import urljoin
|
||||
|
||||
import musicbrainzngs
|
||||
|
|
@ -53,7 +55,7 @@ FIELDS_TO_MB_KEYS = {
|
|||
musicbrainzngs.set_useragent("beets", beets.__version__, "https://beets.io/")
|
||||
|
||||
|
||||
class MusicBrainzAPIError(util.HumanReadableException):
|
||||
class MusicBrainzAPIError(util.HumanReadableError):
|
||||
"""An error while talking to MusicBrainz. The `query` field is the
|
||||
parameter to the action and may have any type.
|
||||
"""
|
||||
|
|
@ -130,7 +132,7 @@ def configure():
|
|||
)
|
||||
|
||||
|
||||
def _preferred_alias(aliases: List):
|
||||
def _preferred_alias(aliases: list):
|
||||
"""Given an list of alias structures for an artist credit, select
|
||||
and return the user's preferred alias alias or None if no matching
|
||||
alias is found.
|
||||
|
|
@ -165,7 +167,7 @@ def _preferred_alias(aliases: List):
|
|||
return matches[0]
|
||||
|
||||
|
||||
def _preferred_release_event(release: Dict[str, Any]) -> Tuple[str, str]:
|
||||
def _preferred_release_event(release: dict[str, Any]) -> tuple[str, str]:
|
||||
"""Given a release, select and return the user's preferred release
|
||||
event as a tuple of (country, release_date). Fall back to the
|
||||
default release event if a preferred event is not found.
|
||||
|
|
@ -185,8 +187,8 @@ def _preferred_release_event(release: Dict[str, Any]) -> Tuple[str, str]:
|
|||
|
||||
|
||||
def _multi_artist_credit(
|
||||
credit: List[Dict], include_join_phrase: bool
|
||||
) -> Tuple[List[str], List[str], List[str]]:
|
||||
credit: list[dict], include_join_phrase: bool
|
||||
) -> tuple[list[str], list[str], list[str]]:
|
||||
"""Given a list representing an ``artist-credit`` block, accumulate
|
||||
data into a triple of joined artist name lists: canonical, sort, and
|
||||
credit.
|
||||
|
|
@ -233,7 +235,7 @@ def _multi_artist_credit(
|
|||
)
|
||||
|
||||
|
||||
def _flatten_artist_credit(credit: List[Dict]) -> Tuple[str, str, str]:
|
||||
def _flatten_artist_credit(credit: list[dict]) -> tuple[str, str, str]:
|
||||
"""Given a list representing an ``artist-credit`` block, flatten the
|
||||
data into a triple of joined artist name strings: canonical, sort, and
|
||||
credit.
|
||||
|
|
@ -248,12 +250,12 @@ def _flatten_artist_credit(credit: List[Dict]) -> Tuple[str, str, str]:
|
|||
)
|
||||
|
||||
|
||||
def _artist_ids(credit: List[Dict]) -> List[str]:
|
||||
def _artist_ids(credit: list[dict]) -> list[str]:
|
||||
"""
|
||||
Given a list representing an ``artist-credit``,
|
||||
return a list of artist IDs
|
||||
"""
|
||||
artist_ids: List[str] = []
|
||||
artist_ids: list[str] = []
|
||||
for el in credit:
|
||||
if isinstance(el, dict):
|
||||
artist_ids.append(el["artist"]["id"])
|
||||
|
|
@ -275,11 +277,11 @@ def _get_related_artist_names(relations, relation_type):
|
|||
|
||||
|
||||
def track_info(
|
||||
recording: Dict,
|
||||
index: Optional[int] = None,
|
||||
medium: Optional[int] = None,
|
||||
medium_index: Optional[int] = None,
|
||||
medium_total: Optional[int] = None,
|
||||
recording: dict,
|
||||
index: int | None = None,
|
||||
medium: int | None = None,
|
||||
medium_index: int | None = None,
|
||||
medium_total: int | None = None,
|
||||
) -> beets.autotag.hooks.TrackInfo:
|
||||
"""Translates a MusicBrainz recording result dictionary into a beets
|
||||
``TrackInfo`` object. Three parameters are optional and are used
|
||||
|
|
@ -372,7 +374,6 @@ def track_info(
|
|||
for extra_trackdata in extra_trackdatas:
|
||||
info.update(extra_trackdata)
|
||||
|
||||
info.decode()
|
||||
return info
|
||||
|
||||
|
||||
|
|
@ -400,7 +401,7 @@ def _set_date_str(
|
|||
setattr(info, key, date_num)
|
||||
|
||||
|
||||
def album_info(release: Dict) -> beets.autotag.hooks.AlbumInfo:
|
||||
def album_info(release: dict) -> beets.autotag.hooks.AlbumInfo:
|
||||
"""Takes a MusicBrainz release result dictionary and returns a beets
|
||||
AlbumInfo object containing the interesting data about that release.
|
||||
"""
|
||||
|
|
@ -619,79 +620,50 @@ def album_info(release: Dict) -> beets.autotag.hooks.AlbumInfo:
|
|||
)
|
||||
|
||||
# We might find links to external sources (Discogs, Bandcamp, ...)
|
||||
if any(
|
||||
config["musicbrainz"]["external_ids"].get().values()
|
||||
) and release.get("url-relation-list"):
|
||||
discogs_url, bandcamp_url, spotify_url = None, None, None
|
||||
deezer_url, beatport_url, tidal_url = None, None, None
|
||||
fetch_discogs, fetch_bandcamp, fetch_spotify = False, False, False
|
||||
fetch_deezer, fetch_beatport, fetch_tidal = False, False, False
|
||||
external_ids = config["musicbrainz"]["external_ids"].get()
|
||||
wanted_sources = {site for site, wanted in external_ids.items() if wanted}
|
||||
if wanted_sources and (url_rels := release.get("url-relation-list")):
|
||||
urls = {}
|
||||
|
||||
if config["musicbrainz"]["external_ids"]["discogs"].get():
|
||||
fetch_discogs = True
|
||||
if config["musicbrainz"]["external_ids"]["bandcamp"].get():
|
||||
fetch_bandcamp = True
|
||||
if config["musicbrainz"]["external_ids"]["spotify"].get():
|
||||
fetch_spotify = True
|
||||
if config["musicbrainz"]["external_ids"]["deezer"].get():
|
||||
fetch_deezer = True
|
||||
if config["musicbrainz"]["external_ids"]["beatport"].get():
|
||||
fetch_beatport = True
|
||||
if config["musicbrainz"]["external_ids"]["tidal"].get():
|
||||
fetch_tidal = True
|
||||
for source, url in product(wanted_sources, url_rels):
|
||||
if f"{source}.com" in (target := url["target"]):
|
||||
urls[source] = target
|
||||
log.debug(
|
||||
"Found link to {} release via MusicBrainz",
|
||||
source.capitalize(),
|
||||
)
|
||||
|
||||
for url in release["url-relation-list"]:
|
||||
if fetch_discogs and url["type"] == "discogs":
|
||||
log.debug("Found link to Discogs release via MusicBrainz")
|
||||
discogs_url = url["target"]
|
||||
if fetch_bandcamp and "bandcamp.com" in url["target"]:
|
||||
log.debug("Found link to Bandcamp release via MusicBrainz")
|
||||
bandcamp_url = url["target"]
|
||||
if fetch_spotify and "spotify.com" in url["target"]:
|
||||
log.debug("Found link to Spotify album via MusicBrainz")
|
||||
spotify_url = url["target"]
|
||||
if fetch_deezer and "deezer.com" in url["target"]:
|
||||
log.debug("Found link to Deezer album via MusicBrainz")
|
||||
deezer_url = url["target"]
|
||||
if fetch_beatport and "beatport.com" in url["target"]:
|
||||
log.debug("Found link to Beatport release via MusicBrainz")
|
||||
beatport_url = url["target"]
|
||||
if fetch_tidal and "tidal.com" in url["target"]:
|
||||
log.debug("Found link to Tidal release via MusicBrainz")
|
||||
tidal_url = url["target"]
|
||||
|
||||
if discogs_url:
|
||||
info.discogs_albumid = extract_discogs_id_regex(discogs_url)
|
||||
if bandcamp_url:
|
||||
info.bandcamp_album_id = bandcamp_url
|
||||
if spotify_url:
|
||||
if "discogs" in urls:
|
||||
info.discogs_albumid = extract_discogs_id_regex(urls["discogs"])
|
||||
if "bandcamp" in urls:
|
||||
info.bandcamp_album_id = urls["bandcamp"]
|
||||
if "spotify" in urls:
|
||||
info.spotify_album_id = MetadataSourcePlugin._get_id(
|
||||
"album", spotify_url, spotify_id_regex
|
||||
"album", urls["spotify"], spotify_id_regex
|
||||
)
|
||||
if deezer_url:
|
||||
if "deezer" in urls:
|
||||
info.deezer_album_id = MetadataSourcePlugin._get_id(
|
||||
"album", deezer_url, deezer_id_regex
|
||||
"album", urls["deezer"], deezer_id_regex
|
||||
)
|
||||
if beatport_url:
|
||||
if "beatport" in urls:
|
||||
info.beatport_album_id = MetadataSourcePlugin._get_id(
|
||||
"album", beatport_url, beatport_id_regex
|
||||
"album", urls["beatport"], beatport_id_regex
|
||||
)
|
||||
if tidal_url:
|
||||
info.tidal_album_id = tidal_url.split("/")[-1]
|
||||
if "tidal" in urls:
|
||||
info.tidal_album_id = urls["tidal"].split("/")[-1]
|
||||
|
||||
extra_albumdatas = plugins.send("mb_album_extract", data=release)
|
||||
for extra_albumdata in extra_albumdatas:
|
||||
info.update(extra_albumdata)
|
||||
|
||||
info.decode()
|
||||
return info
|
||||
|
||||
|
||||
def match_album(
|
||||
artist: str,
|
||||
album: str,
|
||||
tracks: Optional[int] = None,
|
||||
extra_tags: Optional[Dict[str, Any]] = None,
|
||||
tracks: int | None = None,
|
||||
extra_tags: dict[str, Any] | None = None,
|
||||
) -> Iterator[beets.autotag.hooks.AlbumInfo]:
|
||||
"""Searches for a single album ("release" in MusicBrainz parlance)
|
||||
and returns an iterator over AlbumInfo objects. May raise a
|
||||
|
|
@ -768,7 +740,7 @@ def match_track(
|
|||
yield track_info(recording)
|
||||
|
||||
|
||||
def _parse_id(s: str) -> Optional[str]:
|
||||
def _parse_id(s: str) -> str | None:
|
||||
"""Search for a MusicBrainz ID in the given string and return it. If
|
||||
no ID can be found, return None.
|
||||
"""
|
||||
|
|
@ -785,8 +757,8 @@ def _is_translation(r):
|
|||
|
||||
|
||||
def _find_actual_release_from_pseudo_release(
|
||||
pseudo_rel: Dict,
|
||||
) -> Optional[Dict]:
|
||||
pseudo_rel: dict,
|
||||
) -> dict | None:
|
||||
try:
|
||||
relations = pseudo_rel["release"]["release-relation-list"]
|
||||
except KeyError:
|
||||
|
|
@ -805,7 +777,7 @@ def _find_actual_release_from_pseudo_release(
|
|||
|
||||
def _merge_pseudo_and_actual_album(
|
||||
pseudo: beets.autotag.hooks.AlbumInfo, actual: beets.autotag.hooks.AlbumInfo
|
||||
) -> Optional[beets.autotag.hooks.AlbumInfo]:
|
||||
) -> beets.autotag.hooks.AlbumInfo | None:
|
||||
"""
|
||||
Merges a pseudo release with its actual release.
|
||||
|
||||
|
|
@ -843,7 +815,7 @@ def _merge_pseudo_and_actual_album(
|
|||
return merged
|
||||
|
||||
|
||||
def album_for_id(releaseid: str) -> Optional[beets.autotag.hooks.AlbumInfo]:
|
||||
def album_for_id(releaseid: str) -> beets.autotag.hooks.AlbumInfo | None:
|
||||
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
|
||||
object or None if the album is not found. May raise a
|
||||
MusicBrainzAPIError.
|
||||
|
|
@ -881,7 +853,7 @@ def album_for_id(releaseid: str) -> Optional[beets.autotag.hooks.AlbumInfo]:
|
|||
return release
|
||||
|
||||
|
||||
def track_for_id(releaseid: str) -> Optional[beets.autotag.hooks.TrackInfo]:
|
||||
def track_for_id(releaseid: str) -> beets.autotag.hooks.TrackInfo | None:
|
||||
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
|
||||
or None if no track is found. May raise a MusicBrainzAPIError.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -207,6 +207,7 @@ match:
|
|||
track_index: 1.0
|
||||
track_length: 2.0
|
||||
track_id: 5.0
|
||||
medium: 1.0
|
||||
preferred:
|
||||
countries: []
|
||||
media: []
|
||||
|
|
|
|||
|
|
@ -32,4 +32,18 @@ from .queryparse import (
|
|||
)
|
||||
from .types import Type
|
||||
|
||||
# flake8: noqa
|
||||
__all__ = [
|
||||
"AndQuery",
|
||||
"Database",
|
||||
"FieldQuery",
|
||||
"InvalidQueryError",
|
||||
"MatchQuery",
|
||||
"Model",
|
||||
"OrQuery",
|
||||
"Query",
|
||||
"Results",
|
||||
"Type",
|
||||
"parse_sorted_query",
|
||||
"query_from_strings",
|
||||
"sort_from_strings",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -24,28 +24,9 @@ import threading
|
|||
import time
|
||||
from abc import ABC
|
||||
from collections import defaultdict
|
||||
from collections.abc import Generator, Iterable, Iterator, Mapping, Sequence
|
||||
from sqlite3 import Connection
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
AnyStr,
|
||||
Callable,
|
||||
DefaultDict,
|
||||
Dict,
|
||||
Generator,
|
||||
Generic,
|
||||
Iterable,
|
||||
Iterator,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
from typing import TYPE_CHECKING, Any, AnyStr, Callable, Generic, TypeVar, cast
|
||||
|
||||
from unidecode import unidecode
|
||||
|
||||
|
|
@ -56,6 +37,8 @@ from . import types
|
|||
from .query import (
|
||||
AndQuery,
|
||||
FieldQuery,
|
||||
FieldQueryType,
|
||||
FieldSort,
|
||||
MatchQuery,
|
||||
NullSort,
|
||||
Query,
|
||||
|
|
@ -63,6 +46,18 @@ from .query import (
|
|||
TrueQuery,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from types import TracebackType
|
||||
|
||||
from .query import SQLiteType
|
||||
|
||||
D = TypeVar("D", bound="Database", default=Any)
|
||||
else:
|
||||
D = TypeVar("D", bound="Database")
|
||||
|
||||
|
||||
FlexAttrs = dict[str, str]
|
||||
|
||||
|
||||
class DBAccessError(Exception):
|
||||
"""The SQLite database became inaccessible.
|
||||
|
|
@ -120,7 +115,7 @@ class FormattedMapping(Mapping[str, str]):
|
|||
def get( # type: ignore
|
||||
self,
|
||||
key: str,
|
||||
default: Optional[str] = None,
|
||||
default: str | None = None,
|
||||
) -> str:
|
||||
"""Similar to Mapping.get(key, default), but always formats to str."""
|
||||
if default is None:
|
||||
|
|
@ -158,14 +153,14 @@ class FormattedMapping(Mapping[str, str]):
|
|||
class LazyConvertDict:
|
||||
"""Lazily convert types for attributes fetched from the database"""
|
||||
|
||||
def __init__(self, model_cls: "Model"):
|
||||
def __init__(self, model_cls: Model):
|
||||
"""Initialize the object empty"""
|
||||
# FIXME: Dict[str, SQLiteType]
|
||||
self._data: Dict[str, Any] = {}
|
||||
self._data: dict[str, Any] = {}
|
||||
self.model_cls = model_cls
|
||||
self._converted: Dict[str, Any] = {}
|
||||
self._converted: dict[str, Any] = {}
|
||||
|
||||
def init(self, data: Dict[str, Any]):
|
||||
def init(self, data: dict[str, Any]):
|
||||
"""Set the base data that should be lazily converted"""
|
||||
self._data = data
|
||||
|
||||
|
|
@ -195,7 +190,7 @@ class LazyConvertDict:
|
|||
if key in self._data:
|
||||
del self._data[key]
|
||||
|
||||
def keys(self) -> List[str]:
|
||||
def keys(self) -> list[str]:
|
||||
"""Get a list of available field names for this object."""
|
||||
return list(self._converted.keys()) + list(self._data.keys())
|
||||
|
||||
|
|
@ -213,14 +208,14 @@ class LazyConvertDict:
|
|||
for key, value in values.items():
|
||||
self[key] = value
|
||||
|
||||
def items(self) -> Iterable[Tuple[str, Any]]:
|
||||
def items(self) -> Iterable[tuple[str, Any]]:
|
||||
"""Iterate over (key, value) pairs that this object contains.
|
||||
Computed fields are not included.
|
||||
"""
|
||||
for key in self:
|
||||
yield key, self[key]
|
||||
|
||||
def get(self, key: str, default: Optional[Any] = None):
|
||||
def get(self, key: str, default: Any | None = None):
|
||||
"""Get the value for a given key or `default` if it does not
|
||||
exist.
|
||||
"""
|
||||
|
|
@ -252,7 +247,7 @@ class LazyConvertDict:
|
|||
# Abstract base for model classes.
|
||||
|
||||
|
||||
class Model(ABC):
|
||||
class Model(ABC, Generic[D]):
|
||||
"""An abstract object representing an object in the database. Model
|
||||
objects act like dictionaries (i.e., they allow subscript access like
|
||||
``obj['field']``). The same field set is available via attribute
|
||||
|
|
@ -286,7 +281,7 @@ class Model(ABC):
|
|||
"""The flex field SQLite table name.
|
||||
"""
|
||||
|
||||
_fields: Dict[str, types.Type] = {}
|
||||
_fields: dict[str, types.Type] = {}
|
||||
"""A mapping indicating available "fixed" fields on this type. The
|
||||
keys are field names and the values are `Type` objects.
|
||||
"""
|
||||
|
|
@ -296,16 +291,16 @@ class Model(ABC):
|
|||
terms.
|
||||
"""
|
||||
|
||||
_types: Dict[str, types.Type] = {}
|
||||
_types: dict[str, types.Type] = {}
|
||||
"""Optional Types for non-fixed (i.e., flexible and computed) fields.
|
||||
"""
|
||||
|
||||
_sorts: Dict[str, Type[Sort]] = {}
|
||||
_sorts: dict[str, type[FieldSort]] = {}
|
||||
"""Optional named sort criteria. The keys are strings and the values
|
||||
are subclasses of `Sort`.
|
||||
"""
|
||||
|
||||
_queries: Dict[str, Type[FieldQuery]] = {}
|
||||
_queries: dict[str, FieldQueryType] = {}
|
||||
"""Named queries that use a field-like `name:value` syntax but which
|
||||
do not relate to any specific field.
|
||||
"""
|
||||
|
|
@ -322,7 +317,7 @@ class Model(ABC):
|
|||
"""
|
||||
|
||||
@cached_classproperty
|
||||
def _relation(cls) -> type[Model]:
|
||||
def _relation(cls):
|
||||
"""The model that this model is closely related to."""
|
||||
return cls
|
||||
|
||||
|
|
@ -348,7 +343,7 @@ class Model(ABC):
|
|||
return cls._relation._fields.keys() - cls.shared_db_fields
|
||||
|
||||
@classmethod
|
||||
def _getters(cls: Type["Model"]):
|
||||
def _getters(cls: type[Model]):
|
||||
"""Return a mapping from field names to getter functions."""
|
||||
# We could cache this if it becomes a performance problem to
|
||||
# gather the getter mapping every time.
|
||||
|
|
@ -363,7 +358,7 @@ class Model(ABC):
|
|||
|
||||
# Basic operation.
|
||||
|
||||
def __init__(self, db: Optional[Database] = None, **values):
|
||||
def __init__(self, db: D | None = None, **values):
|
||||
"""Create a new object with an optional Database association and
|
||||
initial field values.
|
||||
"""
|
||||
|
|
@ -378,10 +373,10 @@ class Model(ABC):
|
|||
|
||||
@classmethod
|
||||
def _awaken(
|
||||
cls: Type[AnyModel],
|
||||
db: Optional[Database] = None,
|
||||
fixed_values: Dict[str, Any] = {},
|
||||
flex_values: Dict[str, Any] = {},
|
||||
cls: type[AnyModel],
|
||||
db: D | None = None,
|
||||
fixed_values: dict[str, Any] = {},
|
||||
flex_values: dict[str, Any] = {},
|
||||
) -> AnyModel:
|
||||
"""Create an object with values drawn from the database.
|
||||
|
||||
|
|
@ -409,7 +404,7 @@ class Model(ABC):
|
|||
if self._db:
|
||||
self._revision = self._db.revision
|
||||
|
||||
def _check_db(self, need_id: bool = True) -> Database:
|
||||
def _check_db(self, need_id: bool = True) -> D:
|
||||
"""Ensure that this object is associated with a database row: it
|
||||
has a reference to a database (`_db`) and an id. A ValueError
|
||||
exception is raised otherwise.
|
||||
|
|
@ -421,7 +416,7 @@ class Model(ABC):
|
|||
|
||||
return self._db
|
||||
|
||||
def copy(self) -> "Model":
|
||||
def copy(self) -> Model:
|
||||
"""Create a copy of the model object.
|
||||
|
||||
The field values and other state is duplicated, but the new copy
|
||||
|
|
@ -537,7 +532,7 @@ class Model(ABC):
|
|||
for key, value in values.items():
|
||||
self[key] = value
|
||||
|
||||
def items(self) -> Iterator[Tuple[str, Any]]:
|
||||
def items(self) -> Iterator[tuple[str, Any]]:
|
||||
"""Iterate over (key, value) pairs that this object contains.
|
||||
Computed fields are not included.
|
||||
"""
|
||||
|
|
@ -579,7 +574,7 @@ class Model(ABC):
|
|||
|
||||
# Database interaction (CRUD methods).
|
||||
|
||||
def store(self, fields: Optional[Iterable[str]] = None):
|
||||
def store(self, fields: Iterable[str] | None = None):
|
||||
"""Save the object's metadata into the library database.
|
||||
:param fields: the fields to be stored. If not specified, all fields
|
||||
will be.
|
||||
|
|
@ -590,7 +585,7 @@ class Model(ABC):
|
|||
|
||||
# Build assignments for query.
|
||||
assignments = []
|
||||
subvars = []
|
||||
subvars: list[SQLiteType] = []
|
||||
for key in fields:
|
||||
if key != "id" and key in self._dirty:
|
||||
self._dirty.remove(key)
|
||||
|
|
@ -653,7 +648,7 @@ class Model(ABC):
|
|||
f"DELETE FROM {self._flex_table} WHERE entity_id=?", (self.id,)
|
||||
)
|
||||
|
||||
def add(self, db: Optional["Database"] = None):
|
||||
def add(self, db: D | None = None):
|
||||
"""Add the object to the library database. This object must be
|
||||
associated with a database; you can provide one via the `db`
|
||||
parameter or use the currently associated database.
|
||||
|
|
@ -692,7 +687,7 @@ class Model(ABC):
|
|||
|
||||
def evaluate_template(
|
||||
self,
|
||||
template: Union[str, functemplate.Template],
|
||||
template: str | functemplate.Template,
|
||||
for_path: bool = False,
|
||||
) -> str:
|
||||
"""Evaluate a template (a string or a `Template` object) using
|
||||
|
|
@ -730,16 +725,16 @@ class Model(ABC):
|
|||
cls,
|
||||
field,
|
||||
pattern,
|
||||
query_cls: Type[FieldQuery] = MatchQuery,
|
||||
query_cls: FieldQueryType = MatchQuery,
|
||||
) -> FieldQuery:
|
||||
"""Get a `FieldQuery` for this model."""
|
||||
return query_cls(field, pattern, field in cls._fields)
|
||||
|
||||
@classmethod
|
||||
def all_fields_query(
|
||||
cls: Type["Model"],
|
||||
pats: Mapping,
|
||||
query_cls: Type[FieldQuery] = MatchQuery,
|
||||
cls: type[Model],
|
||||
pats: Mapping[str, str],
|
||||
query_cls: FieldQueryType = MatchQuery,
|
||||
):
|
||||
"""Get a query that matches many fields with different patterns.
|
||||
|
||||
|
|
@ -764,11 +759,11 @@ class Results(Generic[AnyModel]):
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
model_class: Type[AnyModel],
|
||||
rows: List[Mapping],
|
||||
db: "Database",
|
||||
model_class: type[AnyModel],
|
||||
rows: list[sqlite3.Row],
|
||||
db: D,
|
||||
flex_rows,
|
||||
query: Optional[Query] = None,
|
||||
query: Query | None = None,
|
||||
sort=None,
|
||||
):
|
||||
"""Create a result set that will construct objects of type
|
||||
|
|
@ -800,7 +795,7 @@ class Results(Generic[AnyModel]):
|
|||
|
||||
# The materialized objects corresponding to rows that have been
|
||||
# consumed.
|
||||
self._objects: List[AnyModel] = []
|
||||
self._objects: list[AnyModel] = []
|
||||
|
||||
def _get_objects(self) -> Iterator[AnyModel]:
|
||||
"""Construct and generate Model objects for they query. The
|
||||
|
|
@ -850,9 +845,9 @@ class Results(Generic[AnyModel]):
|
|||
# Objects are pre-sorted (i.e., by the database).
|
||||
return self._get_objects()
|
||||
|
||||
def _get_indexed_flex_attrs(self) -> Mapping:
|
||||
def _get_indexed_flex_attrs(self) -> dict[int, FlexAttrs]:
|
||||
"""Index flexible attributes by the entity id they belong to"""
|
||||
flex_values: Dict[int, Dict[str, Any]] = {}
|
||||
flex_values: dict[int, FlexAttrs] = {}
|
||||
for row in self.flex_rows:
|
||||
if row["entity_id"] not in flex_values:
|
||||
flex_values[row["entity_id"]] = {}
|
||||
|
|
@ -861,7 +856,9 @@ class Results(Generic[AnyModel]):
|
|||
|
||||
return flex_values
|
||||
|
||||
def _make_model(self, row, flex_values: Dict = {}) -> AnyModel:
|
||||
def _make_model(
|
||||
self, row: sqlite3.Row, flex_values: FlexAttrs = {}
|
||||
) -> AnyModel:
|
||||
"""Create a Model object for the given row"""
|
||||
cols = dict(row)
|
||||
values = {k: v for (k, v) in cols.items() if not k[:4] == "flex"}
|
||||
|
|
@ -912,7 +909,7 @@ class Results(Generic[AnyModel]):
|
|||
except StopIteration:
|
||||
raise IndexError(f"result index {n} out of range")
|
||||
|
||||
def get(self) -> Optional[AnyModel]:
|
||||
def get(self) -> AnyModel | None:
|
||||
"""Return the first matching object, or None if no objects
|
||||
match.
|
||||
"""
|
||||
|
|
@ -933,10 +930,10 @@ class Transaction:
|
|||
current transaction.
|
||||
"""
|
||||
|
||||
def __init__(self, db: "Database"):
|
||||
def __init__(self, db: Database):
|
||||
self.db = db
|
||||
|
||||
def __enter__(self) -> "Transaction":
|
||||
def __enter__(self) -> Transaction:
|
||||
"""Begin a transaction. This transaction may be created while
|
||||
another is active in a different thread.
|
||||
"""
|
||||
|
|
@ -951,7 +948,7 @@ class Transaction:
|
|||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: Type[Exception],
|
||||
exc_type: type[Exception],
|
||||
exc_value: Exception,
|
||||
traceback: TracebackType,
|
||||
):
|
||||
|
|
@ -970,14 +967,16 @@ class Transaction:
|
|||
self._mutated = False
|
||||
self.db._db_lock.release()
|
||||
|
||||
def query(self, statement: str, subvals: Sequence = ()) -> List:
|
||||
def query(
|
||||
self, statement: str, subvals: Sequence[SQLiteType] = ()
|
||||
) -> list[sqlite3.Row]:
|
||||
"""Execute an SQL statement with substitution values and return
|
||||
a list of rows from the database.
|
||||
"""
|
||||
cursor = self.db._connection().execute(statement, subvals)
|
||||
return cursor.fetchall()
|
||||
|
||||
def mutate(self, statement: str, subvals: Sequence = ()) -> Any:
|
||||
def mutate(self, statement: str, subvals: Sequence[SQLiteType] = ()) -> Any:
|
||||
"""Execute an SQL statement with substitution values and return
|
||||
the row ID of the last affected row.
|
||||
"""
|
||||
|
|
@ -1010,7 +1009,7 @@ class Database:
|
|||
the backend.
|
||||
"""
|
||||
|
||||
_models: Sequence[Type[Model]] = ()
|
||||
_models: Sequence[type[Model]] = ()
|
||||
"""The Model subclasses representing tables in this database.
|
||||
"""
|
||||
|
||||
|
|
@ -1031,9 +1030,9 @@ class Database:
|
|||
self.path = path
|
||||
self.timeout = timeout
|
||||
|
||||
self._connections: Dict[int, sqlite3.Connection] = {}
|
||||
self._tx_stacks: DefaultDict[int, List[Transaction]] = defaultdict(list)
|
||||
self._extensions: List[str] = []
|
||||
self._connections: dict[int, sqlite3.Connection] = {}
|
||||
self._tx_stacks: defaultdict[int, list[Transaction]] = defaultdict(list)
|
||||
self._extensions: list[str] = []
|
||||
|
||||
# A lock to protect the _connections and _tx_stacks maps, which
|
||||
# both map thread IDs to private resources.
|
||||
|
|
@ -1110,7 +1109,7 @@ class Database:
|
|||
value = value.decode()
|
||||
return re.search(pattern, str(value)) is not None
|
||||
|
||||
def bytelower(bytestring: Optional[AnyStr]) -> Optional[AnyStr]:
|
||||
def bytelower(bytestring: AnyStr | None) -> AnyStr | None:
|
||||
"""A custom ``bytelower`` sqlite function so we can compare
|
||||
bytestrings in a semi case insensitive fashion.
|
||||
|
||||
|
|
@ -1138,7 +1137,7 @@ class Database:
|
|||
conn.close()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _tx_stack(self) -> Generator[List, None, None]:
|
||||
def _tx_stack(self) -> Generator[list[Transaction]]:
|
||||
"""A context manager providing access to the current thread's
|
||||
transaction stack. The context manager synchronizes access to
|
||||
the stack map. Transactions should never migrate across threads.
|
||||
|
|
@ -1224,18 +1223,16 @@ class Database:
|
|||
UNIQUE(entity_id, key) ON CONFLICT REPLACE);
|
||||
CREATE INDEX IF NOT EXISTS {0}_by_entity
|
||||
ON {0} (entity_id);
|
||||
""".format(
|
||||
flex_table
|
||||
)
|
||||
""".format(flex_table)
|
||||
)
|
||||
|
||||
# Querying.
|
||||
|
||||
def _fetch(
|
||||
self,
|
||||
model_cls: Type[AnyModel],
|
||||
query: Optional[Query] = None,
|
||||
sort: Optional[Sort] = None,
|
||||
model_cls: type[AnyModel],
|
||||
query: Query | None = None,
|
||||
sort: Sort | None = None,
|
||||
) -> Results[AnyModel]:
|
||||
"""Fetch the objects of type `model_cls` matching the given
|
||||
query. The query may be given as a string, string sequence, a
|
||||
|
|
@ -1291,9 +1288,9 @@ class Database:
|
|||
|
||||
def _get(
|
||||
self,
|
||||
model_cls: Type[AnyModel],
|
||||
model_cls: type[AnyModel],
|
||||
id,
|
||||
) -> Optional[AnyModel]:
|
||||
) -> AnyModel | None:
|
||||
"""Get a Model object by its id or None if the id does not
|
||||
exist.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -19,31 +19,22 @@ from __future__ import annotations
|
|||
import re
|
||||
import unicodedata
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Iterator, MutableSequence, Sequence
|
||||
from datetime import datetime, timedelta
|
||||
from functools import reduce
|
||||
from operator import mul, or_
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Collection,
|
||||
Generic,
|
||||
Iterator,
|
||||
List,
|
||||
MutableSequence,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
Set,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from re import Pattern
|
||||
from typing import TYPE_CHECKING, Any, Generic, TypeVar, Union
|
||||
|
||||
from beets import util
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from beets.dbcore import Model
|
||||
from beets.dbcore.db import AnyModel
|
||||
|
||||
P = TypeVar("P", default=Any)
|
||||
else:
|
||||
P = TypeVar("P")
|
||||
|
||||
|
||||
class ParsingError(ValueError):
|
||||
|
|
@ -83,11 +74,11 @@ class Query(ABC):
|
|||
"""An abstract class representing a query into the database."""
|
||||
|
||||
@property
|
||||
def field_names(self) -> Set[str]:
|
||||
def field_names(self) -> set[str]:
|
||||
"""Return a set with field names that this query operates on."""
|
||||
return set()
|
||||
|
||||
def clause(self) -> Tuple[Optional[str], Sequence[Any]]:
|
||||
def clause(self) -> tuple[str | None, Sequence[Any]]:
|
||||
"""Generate an SQLite expression implementing the query.
|
||||
|
||||
Return (clause, subvals) where clause is a valid sqlite
|
||||
|
|
@ -121,9 +112,9 @@ class Query(ABC):
|
|||
return hash(type(self))
|
||||
|
||||
|
||||
P = TypeVar("P")
|
||||
SQLiteType = Union[str, bytes, float, int, memoryview]
|
||||
SQLiteType = Union[str, bytes, float, int, memoryview, None]
|
||||
AnySQLiteType = TypeVar("AnySQLiteType", bound=SQLiteType)
|
||||
FieldQueryType = type["FieldQuery"]
|
||||
|
||||
|
||||
class FieldQuery(Query, Generic[P]):
|
||||
|
|
@ -141,7 +132,7 @@ class FieldQuery(Query, Generic[P]):
|
|||
)
|
||||
|
||||
@property
|
||||
def field_names(self) -> Set[str]:
|
||||
def field_names(self) -> set[str]:
|
||||
"""Return a set with field names that this query operates on."""
|
||||
return {self.field_name}
|
||||
|
||||
|
|
@ -150,10 +141,10 @@ class FieldQuery(Query, Generic[P]):
|
|||
self.pattern = pattern
|
||||
self.fast = fast
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
return self.field, ()
|
||||
|
||||
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
|
||||
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
|
||||
if self.fast:
|
||||
return self.col_clause()
|
||||
else:
|
||||
|
|
@ -188,7 +179,7 @@ class FieldQuery(Query, Generic[P]):
|
|||
class MatchQuery(FieldQuery[AnySQLiteType]):
|
||||
"""A query that looks for exact matches in an Model field."""
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
return self.field + " = ?", [self.pattern]
|
||||
|
||||
@classmethod
|
||||
|
|
@ -202,7 +193,7 @@ class NoneQuery(FieldQuery[None]):
|
|||
def __init__(self, field, fast: bool = True):
|
||||
super().__init__(field, None, fast)
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
return self.field + " IS NULL", ()
|
||||
|
||||
def match(self, obj: Model) -> bool:
|
||||
|
|
@ -239,7 +230,7 @@ class StringFieldQuery(FieldQuery[P]):
|
|||
class StringQuery(StringFieldQuery[str]):
|
||||
"""A query that matches a whole string in a specific Model field."""
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
search = (
|
||||
self.pattern.replace("\\", "\\\\")
|
||||
.replace("%", "\\%")
|
||||
|
|
@ -257,7 +248,7 @@ class StringQuery(StringFieldQuery[str]):
|
|||
class SubstringQuery(StringFieldQuery[str]):
|
||||
"""A query that matches a substring in a specific Model field."""
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
pattern = (
|
||||
self.pattern.replace("\\", "\\\\")
|
||||
.replace("%", "\\%")
|
||||
|
|
@ -292,7 +283,7 @@ class RegexpQuery(StringFieldQuery[Pattern[str]]):
|
|||
|
||||
super().__init__(field_name, pattern_re, fast)
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
return f" regexp({self.field}, ?)", [self.pattern.pattern]
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -303,7 +294,7 @@ class RegexpQuery(StringFieldQuery[Pattern[str]]):
|
|||
return unicodedata.normalize("NFC", s)
|
||||
|
||||
@classmethod
|
||||
def string_match(cls, pattern: Pattern, value: str) -> bool:
|
||||
def string_match(cls, pattern: Pattern[str], value: str) -> bool:
|
||||
return pattern.search(cls._normalize(value)) is not None
|
||||
|
||||
|
||||
|
|
@ -333,7 +324,7 @@ class BytesQuery(FieldQuery[bytes]):
|
|||
`MatchQuery` when matching on BLOB values.
|
||||
"""
|
||||
|
||||
def __init__(self, field_name: str, pattern: Union[bytes, str, memoryview]):
|
||||
def __init__(self, field_name: str, pattern: bytes | str | memoryview):
|
||||
# Use a buffer/memoryview representation of the pattern for SQLite
|
||||
# matching. This instructs SQLite to treat the blob as binary
|
||||
# rather than encoded Unicode.
|
||||
|
|
@ -351,7 +342,7 @@ class BytesQuery(FieldQuery[bytes]):
|
|||
|
||||
super().__init__(field_name, bytes_pattern)
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
return self.field + " = ?", [self.buf_pattern]
|
||||
|
||||
@classmethod
|
||||
|
|
@ -368,7 +359,7 @@ class NumericQuery(FieldQuery[str]):
|
|||
a float.
|
||||
"""
|
||||
|
||||
def _convert(self, s: str) -> Union[float, int, None]:
|
||||
def _convert(self, s: str) -> float | int | None:
|
||||
"""Convert a string to a numeric type (float or int).
|
||||
|
||||
Return None if `s` is empty.
|
||||
|
|
@ -416,7 +407,7 @@ class NumericQuery(FieldQuery[str]):
|
|||
return False
|
||||
return True
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
if self.point is not None:
|
||||
return self.field + "=?", (self.point,)
|
||||
else:
|
||||
|
|
@ -444,7 +435,7 @@ class InQuery(Generic[AnySQLiteType], FieldQuery[Sequence[AnySQLiteType]]):
|
|||
def subvals(self) -> Sequence[SQLiteType]:
|
||||
return self.pattern
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
placeholders = ", ".join(["?"] * len(self.subvals))
|
||||
return f"{self.field_name} IN ({placeholders})", self.subvals
|
||||
|
||||
|
|
@ -461,11 +452,11 @@ class CollectionQuery(Query):
|
|||
"""
|
||||
|
||||
@property
|
||||
def field_names(self) -> Set[str]:
|
||||
def field_names(self) -> set[str]:
|
||||
"""Return a set with field names that this query operates on."""
|
||||
return reduce(or_, (sq.field_names for sq in self.subqueries))
|
||||
|
||||
def __init__(self, subqueries: Sequence = ()):
|
||||
def __init__(self, subqueries: Sequence[Query] = ()):
|
||||
self.subqueries = subqueries
|
||||
|
||||
# Act like a sequence.
|
||||
|
|
@ -476,7 +467,7 @@ class CollectionQuery(Query):
|
|||
def __getitem__(self, key):
|
||||
return self.subqueries[key]
|
||||
|
||||
def __iter__(self) -> Iterator:
|
||||
def __iter__(self) -> Iterator[Query]:
|
||||
return iter(self.subqueries)
|
||||
|
||||
def __contains__(self, subq) -> bool:
|
||||
|
|
@ -485,12 +476,12 @@ class CollectionQuery(Query):
|
|||
def clause_with_joiner(
|
||||
self,
|
||||
joiner: str,
|
||||
) -> Tuple[Optional[str], Sequence[SQLiteType]]:
|
||||
) -> tuple[str | None, Sequence[SQLiteType]]:
|
||||
"""Return a clause created by joining together the clauses of
|
||||
all subqueries with the string joiner (padded by spaces).
|
||||
"""
|
||||
clause_parts = []
|
||||
subvals = []
|
||||
subvals: list[SQLiteType] = []
|
||||
for subq in self.subqueries:
|
||||
subq_clause, subq_subvals = subq.clause()
|
||||
if not subq_clause:
|
||||
|
|
@ -521,11 +512,11 @@ class AnyFieldQuery(CollectionQuery):
|
|||
"""
|
||||
|
||||
@property
|
||||
def field_names(self) -> Set[str]:
|
||||
def field_names(self) -> set[str]:
|
||||
"""Return a set with field names that this query operates on."""
|
||||
return set(self.fields)
|
||||
|
||||
def __init__(self, pattern, fields, cls: Type[FieldQuery]):
|
||||
def __init__(self, pattern, fields, cls: FieldQueryType):
|
||||
self.pattern = pattern
|
||||
self.fields = fields
|
||||
self.query_class = cls
|
||||
|
|
@ -536,7 +527,7 @@ class AnyFieldQuery(CollectionQuery):
|
|||
# TYPING ERROR
|
||||
super().__init__(subqueries)
|
||||
|
||||
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
|
||||
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
|
||||
return self.clause_with_joiner("or")
|
||||
|
||||
def match(self, obj: Model) -> bool:
|
||||
|
|
@ -563,7 +554,7 @@ class MutableCollectionQuery(CollectionQuery):
|
|||
query is initialized.
|
||||
"""
|
||||
|
||||
subqueries: MutableSequence
|
||||
subqueries: MutableSequence[Query]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.subqueries[key] = value
|
||||
|
|
@ -575,7 +566,7 @@ class MutableCollectionQuery(CollectionQuery):
|
|||
class AndQuery(MutableCollectionQuery):
|
||||
"""A conjunction of a list of other queries."""
|
||||
|
||||
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
|
||||
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
|
||||
return self.clause_with_joiner("and")
|
||||
|
||||
def match(self, obj: Model) -> bool:
|
||||
|
|
@ -585,7 +576,7 @@ class AndQuery(MutableCollectionQuery):
|
|||
class OrQuery(MutableCollectionQuery):
|
||||
"""A conjunction of a list of other queries."""
|
||||
|
||||
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
|
||||
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
|
||||
return self.clause_with_joiner("or")
|
||||
|
||||
def match(self, obj: Model) -> bool:
|
||||
|
|
@ -598,14 +589,14 @@ class NotQuery(Query):
|
|||
"""
|
||||
|
||||
@property
|
||||
def field_names(self) -> Set[str]:
|
||||
def field_names(self) -> set[str]:
|
||||
"""Return a set with field names that this query operates on."""
|
||||
return self.subquery.field_names
|
||||
|
||||
def __init__(self, subquery):
|
||||
self.subquery = subquery
|
||||
|
||||
def clause(self) -> Tuple[Optional[str], Sequence[SQLiteType]]:
|
||||
def clause(self) -> tuple[str | None, Sequence[SQLiteType]]:
|
||||
clause, subvals = self.subquery.clause()
|
||||
if clause:
|
||||
return f"not ({clause})", subvals
|
||||
|
|
@ -630,7 +621,7 @@ class NotQuery(Query):
|
|||
class TrueQuery(Query):
|
||||
"""A query that always matches."""
|
||||
|
||||
def clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
return "1", ()
|
||||
|
||||
def match(self, obj: Model) -> bool:
|
||||
|
|
@ -640,7 +631,7 @@ class TrueQuery(Query):
|
|||
class FalseQuery(Query):
|
||||
"""A query that never matches."""
|
||||
|
||||
def clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
return "0", ()
|
||||
|
||||
def match(self, obj: Model) -> bool:
|
||||
|
|
@ -650,7 +641,7 @@ class FalseQuery(Query):
|
|||
# Time/date queries.
|
||||
|
||||
|
||||
def _parse_periods(pattern: str) -> Tuple[Optional[Period], Optional[Period]]:
|
||||
def _parse_periods(pattern: str) -> tuple[Period | None, Period | None]:
|
||||
"""Parse a string containing two dates separated by two dots (..).
|
||||
Return a pair of `Period` objects.
|
||||
"""
|
||||
|
|
@ -696,7 +687,7 @@ class Period:
|
|||
self.precision = precision
|
||||
|
||||
@classmethod
|
||||
def parse(cls: Type["Period"], string: str) -> Optional["Period"]:
|
||||
def parse(cls: type[Period], string: str) -> Period | None:
|
||||
"""Parse a date and return a `Period` object or `None` if the
|
||||
string is empty, or raise an InvalidQueryArgumentValueError if
|
||||
the string cannot be parsed to a date.
|
||||
|
|
@ -715,7 +706,7 @@ class Period:
|
|||
|
||||
def find_date_and_format(
|
||||
string: str,
|
||||
) -> Union[Tuple[None, None], Tuple[datetime, int]]:
|
||||
) -> tuple[None, None] | tuple[datetime, int]:
|
||||
for ord, format in enumerate(cls.date_formats):
|
||||
for format_option in format:
|
||||
try:
|
||||
|
|
@ -729,7 +720,7 @@ class Period:
|
|||
if not string:
|
||||
return None
|
||||
|
||||
date: Optional[datetime]
|
||||
date: datetime | None
|
||||
|
||||
# Check for a relative date.
|
||||
match_dq = re.match(cls.relative_re, string)
|
||||
|
|
@ -789,7 +780,7 @@ class DateInterval:
|
|||
A right endpoint of None means towards infinity.
|
||||
"""
|
||||
|
||||
def __init__(self, start: Optional[datetime], end: Optional[datetime]):
|
||||
def __init__(self, start: datetime | None, end: datetime | None):
|
||||
if start is not None and end is not None and not start < end:
|
||||
raise ValueError(
|
||||
"start date {} is not before end date {}".format(start, end)
|
||||
|
|
@ -800,8 +791,8 @@ class DateInterval:
|
|||
@classmethod
|
||||
def from_periods(
|
||||
cls,
|
||||
start: Optional[Period],
|
||||
end: Optional[Period],
|
||||
start: Period | None,
|
||||
end: Period | None,
|
||||
) -> DateInterval:
|
||||
"""Create an interval with two Periods as the endpoints."""
|
||||
end_date = end.open_right_endpoint() if end is not None else None
|
||||
|
|
@ -843,7 +834,7 @@ class DateQuery(FieldQuery[str]):
|
|||
|
||||
_clause_tmpl = "{0} {1} ?"
|
||||
|
||||
def col_clause(self) -> Tuple[str, Sequence[SQLiteType]]:
|
||||
def col_clause(self) -> tuple[str, Sequence[SQLiteType]]:
|
||||
clause_parts = []
|
||||
subvals = []
|
||||
|
||||
|
|
@ -875,7 +866,7 @@ class DurationQuery(NumericQuery):
|
|||
or M:SS time interval.
|
||||
"""
|
||||
|
||||
def _convert(self, s: str) -> Optional[float]:
|
||||
def _convert(self, s: str) -> float | None:
|
||||
"""Convert a M:SS or numeric string to a float.
|
||||
|
||||
Return None if `s` is empty.
|
||||
|
|
@ -902,13 +893,13 @@ class Sort:
|
|||
the database.
|
||||
"""
|
||||
|
||||
def order_clause(self) -> Optional[str]:
|
||||
def order_clause(self) -> str | None:
|
||||
"""Generates a SQL fragment to be used in a ORDER BY clause, or
|
||||
None if no fragment is used (i.e., this is a slow sort).
|
||||
"""
|
||||
return None
|
||||
|
||||
def sort(self, items: List) -> List:
|
||||
def sort(self, items: list[AnyModel]) -> list[AnyModel]:
|
||||
"""Sort the list of objects and return a list."""
|
||||
return sorted(items)
|
||||
|
||||
|
|
@ -931,7 +922,7 @@ class Sort:
|
|||
class MultipleSort(Sort):
|
||||
"""Sort that encapsulates multiple sub-sorts."""
|
||||
|
||||
def __init__(self, sorts: Optional[List[Sort]] = None):
|
||||
def __init__(self, sorts: list[Sort] | None = None):
|
||||
self.sorts = sorts or []
|
||||
|
||||
def add_sort(self, sort: Sort):
|
||||
|
|
@ -994,7 +985,7 @@ class FieldSort(Sort):
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
field,
|
||||
field: str,
|
||||
ascending: bool = True,
|
||||
case_insensitive: bool = True,
|
||||
):
|
||||
|
|
@ -1002,13 +993,20 @@ class FieldSort(Sort):
|
|||
self.ascending = ascending
|
||||
self.case_insensitive = case_insensitive
|
||||
|
||||
def sort(self, objs: Collection):
|
||||
def sort(self, objs: list[AnyModel]) -> list[AnyModel]:
|
||||
# TODO: Conversion and null-detection here. In Python 3,
|
||||
# comparisons with None fail. We should also support flexible
|
||||
# attributes with different types without falling over.
|
||||
|
||||
def key(obj: Model) -> Any:
|
||||
field_val = obj.get(self.field, "")
|
||||
field_val = obj.get(self.field, None)
|
||||
if field_val is None:
|
||||
if _type := obj._types.get(self.field):
|
||||
# If the field is typed, use its null value.
|
||||
field_val = obj._types[self.field].null
|
||||
else:
|
||||
# If not, fall back to using an empty string.
|
||||
field_val = ""
|
||||
if self.case_insensitive and isinstance(field_val, str):
|
||||
field_val = field_val.lower()
|
||||
return field_val
|
||||
|
|
@ -1040,8 +1038,8 @@ class FixedFieldSort(FieldSort):
|
|||
if self.case_insensitive:
|
||||
field = (
|
||||
"(CASE "
|
||||
'WHEN TYPEOF({0})="text" THEN LOWER({0}) '
|
||||
'WHEN TYPEOF({0})="blob" THEN LOWER({0}) '
|
||||
"WHEN TYPEOF({0})='text' THEN LOWER({0}) "
|
||||
"WHEN TYPEOF({0})='blob' THEN LOWER({0}) "
|
||||
"ELSE {0} END)".format(self.field)
|
||||
)
|
||||
else:
|
||||
|
|
@ -1061,7 +1059,7 @@ class SlowFieldSort(FieldSort):
|
|||
class NullSort(Sort):
|
||||
"""No sorting. Leave results unsorted."""
|
||||
|
||||
def sort(self, items: List) -> List:
|
||||
def sort(self, items: list[AnyModel]) -> list[AnyModel]:
|
||||
return items
|
||||
|
||||
def __nonzero__(self) -> bool:
|
||||
|
|
@ -1075,3 +1073,23 @@ class NullSort(Sort):
|
|||
|
||||
def __hash__(self) -> int:
|
||||
return 0
|
||||
|
||||
|
||||
class SmartArtistSort(FieldSort):
|
||||
"""Sort by artist (either album artist or track artist),
|
||||
prioritizing the sort field over the raw field.
|
||||
"""
|
||||
|
||||
def order_clause(self):
|
||||
order = "ASC" if self.ascending else "DESC"
|
||||
collate = "COLLATE NOCASE" if self.case_insensitive else ""
|
||||
field = self.field
|
||||
|
||||
return f"COALESCE(NULLIF({field}_sort, ''), {field}) {collate} {order}"
|
||||
|
||||
def sort(self, objs: list[AnyModel]) -> list[AnyModel]:
|
||||
def key(o):
|
||||
val = o[f"{self.field}_sort"] or o[self.field]
|
||||
return val.lower() if self.case_insensitive else val
|
||||
|
||||
return sorted(objs, key=key, reverse=not self.ascending)
|
||||
|
|
|
|||
|
|
@ -14,12 +14,20 @@
|
|||
|
||||
"""Parsing of strings into DBCore queries."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import re
|
||||
from typing import Collection, Dict, List, Optional, Sequence, Tuple, Type
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from . import Model, query
|
||||
from .query import Sort
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Collection, Sequence
|
||||
|
||||
from .query import FieldQueryType, Sort
|
||||
|
||||
Prefixes = dict[str, FieldQueryType]
|
||||
|
||||
PARSE_QUERY_PART_REGEX = re.compile(
|
||||
# Non-capturing optional segment for the keyword.
|
||||
|
|
@ -35,10 +43,10 @@ PARSE_QUERY_PART_REGEX = re.compile(
|
|||
|
||||
def parse_query_part(
|
||||
part: str,
|
||||
query_classes: Dict[str, Type[query.FieldQuery]] = {},
|
||||
prefixes: Dict = {},
|
||||
default_class: Type[query.SubstringQuery] = query.SubstringQuery,
|
||||
) -> Tuple[Optional[str], str, Type[query.FieldQuery], bool]:
|
||||
query_classes: dict[str, FieldQueryType] = {},
|
||||
prefixes: Prefixes = {},
|
||||
default_class: type[query.SubstringQuery] = query.SubstringQuery,
|
||||
) -> tuple[str | None, str, FieldQueryType, bool]:
|
||||
"""Parse a single *query part*, which is a chunk of a complete query
|
||||
string representing a single criterion.
|
||||
|
||||
|
|
@ -104,8 +112,8 @@ def parse_query_part(
|
|||
|
||||
|
||||
def construct_query_part(
|
||||
model_cls: Type[Model],
|
||||
prefixes: Dict,
|
||||
model_cls: type[Model],
|
||||
prefixes: Prefixes,
|
||||
query_part: str,
|
||||
) -> query.Query:
|
||||
"""Parse a *query part* string and return a :class:`Query` object.
|
||||
|
|
@ -127,7 +135,7 @@ def construct_query_part(
|
|||
|
||||
# Use `model_cls` to build up a map from field (or query) names to
|
||||
# `Query` classes.
|
||||
query_classes: Dict[str, Type[query.FieldQuery]] = {}
|
||||
query_classes: dict[str, FieldQueryType] = {}
|
||||
for k, t in itertools.chain(
|
||||
model_cls._fields.items(), model_cls._types.items()
|
||||
):
|
||||
|
|
@ -152,14 +160,15 @@ def construct_query_part(
|
|||
# Field queries get constructed according to the name of the field
|
||||
# they are querying.
|
||||
else:
|
||||
key = key.lower()
|
||||
if key in model_cls.shared_db_fields:
|
||||
field = table = key.lower()
|
||||
if field in model_cls.shared_db_fields:
|
||||
# This field exists in both tables, so SQLite will encounter
|
||||
# an OperationalError if we try to query it in a join.
|
||||
# Using an explicit table name resolves this.
|
||||
key = f"{model_cls._table}.{key}"
|
||||
table = f"{model_cls._table}.{field}"
|
||||
|
||||
out_query = query_class(key, pattern, key in model_cls.all_db_fields)
|
||||
field_in_db = field in model_cls.all_db_fields
|
||||
out_query = query_class(table, pattern, field_in_db)
|
||||
|
||||
# Apply negation.
|
||||
if negate:
|
||||
|
|
@ -170,9 +179,9 @@ def construct_query_part(
|
|||
|
||||
# TYPING ERROR
|
||||
def query_from_strings(
|
||||
query_cls: Type[query.CollectionQuery],
|
||||
model_cls: Type[Model],
|
||||
prefixes: Dict,
|
||||
query_cls: type[query.CollectionQuery],
|
||||
model_cls: type[Model],
|
||||
prefixes: Prefixes,
|
||||
query_parts: Collection[str],
|
||||
) -> query.Query:
|
||||
"""Creates a collection query of type `query_cls` from a list of
|
||||
|
|
@ -188,7 +197,7 @@ def query_from_strings(
|
|||
|
||||
|
||||
def construct_sort_part(
|
||||
model_cls: Type[Model],
|
||||
model_cls: type[Model],
|
||||
part: str,
|
||||
case_insensitive: bool = True,
|
||||
) -> Sort:
|
||||
|
|
@ -206,20 +215,20 @@ def construct_sort_part(
|
|||
assert direction in ("+", "-"), "part must end with + or -"
|
||||
is_ascending = direction == "+"
|
||||
|
||||
if field in model_cls._sorts:
|
||||
sort = model_cls._sorts[field](
|
||||
model_cls, is_ascending, case_insensitive
|
||||
)
|
||||
if sort_cls := model_cls._sorts.get(field):
|
||||
if isinstance(sort_cls, query.SmartArtistSort):
|
||||
field = "albumartist" if model_cls.__name__ == "Album" else "artist"
|
||||
elif field in model_cls._fields:
|
||||
sort = query.FixedFieldSort(field, is_ascending, case_insensitive)
|
||||
sort_cls = query.FixedFieldSort
|
||||
else:
|
||||
# Flexible or computed.
|
||||
sort = query.SlowFieldSort(field, is_ascending, case_insensitive)
|
||||
return sort
|
||||
sort_cls = query.SlowFieldSort
|
||||
|
||||
return sort_cls(field, is_ascending, case_insensitive)
|
||||
|
||||
|
||||
def sort_from_strings(
|
||||
model_cls: Type[Model],
|
||||
model_cls: type[Model],
|
||||
sort_parts: Sequence[str],
|
||||
case_insensitive: bool = True,
|
||||
) -> Sort:
|
||||
|
|
@ -238,11 +247,11 @@ def sort_from_strings(
|
|||
|
||||
|
||||
def parse_sorted_query(
|
||||
model_cls: Type[Model],
|
||||
parts: List[str],
|
||||
prefixes: Dict = {},
|
||||
model_cls: type[Model],
|
||||
parts: list[str],
|
||||
prefixes: Prefixes = {},
|
||||
case_insensitive: bool = True,
|
||||
) -> Tuple[query.Query, Sort]:
|
||||
) -> tuple[query.Query, Sort]:
|
||||
"""Given a list of strings, create the `Query` and `Sort` that they
|
||||
represent.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -12,15 +12,23 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Representation of type information for DBCore model fields.
|
||||
"""
|
||||
"""Representation of type information for DBCore model fields."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
from abc import ABC
|
||||
from typing import Any, Generic, List, TypeVar, Union, cast
|
||||
from typing import TYPE_CHECKING, Any, Generic, TypeVar, cast
|
||||
|
||||
from beets.util import str2bool
|
||||
|
||||
from .query import BooleanQuery, FieldQuery, NumericQuery, SubstringQuery
|
||||
from .query import (
|
||||
BooleanQuery,
|
||||
FieldQueryType,
|
||||
NumericQuery,
|
||||
SQLiteType,
|
||||
SubstringQuery,
|
||||
)
|
||||
|
||||
|
||||
class ModelType(typing.Protocol):
|
||||
|
|
@ -35,8 +43,12 @@ class ModelType(typing.Protocol):
|
|||
# Generic type variables, used for the value type T and null type N (if
|
||||
# nullable, else T and N are set to the same type for the concrete subclasses
|
||||
# of Type).
|
||||
N = TypeVar("N")
|
||||
T = TypeVar("T", bound=ModelType)
|
||||
if TYPE_CHECKING:
|
||||
N = TypeVar("N", default=Any)
|
||||
T = TypeVar("T", bound=ModelType, default=Any)
|
||||
else:
|
||||
N = TypeVar("N")
|
||||
T = TypeVar("T", bound=ModelType)
|
||||
|
||||
|
||||
class Type(ABC, Generic[T, N]):
|
||||
|
|
@ -49,11 +61,11 @@ class Type(ABC, Generic[T, N]):
|
|||
"""The SQLite column type for the value.
|
||||
"""
|
||||
|
||||
query: typing.Type[FieldQuery] = SubstringQuery
|
||||
query: FieldQueryType = SubstringQuery
|
||||
"""The `Query` subclass to be used when querying the field.
|
||||
"""
|
||||
|
||||
model_type: typing.Type[T]
|
||||
model_type: type[T]
|
||||
"""The Python type that is used to represent the value in the model.
|
||||
|
||||
The model is guaranteed to return a value of this type if the field
|
||||
|
|
@ -69,7 +81,7 @@ class Type(ABC, Generic[T, N]):
|
|||
# have a field null_type similar to `model_type` and use that here.
|
||||
return cast(N, self.model_type())
|
||||
|
||||
def format(self, value: Union[N, T]) -> str:
|
||||
def format(self, value: N | T) -> str:
|
||||
"""Given a value of this type, produce a Unicode string
|
||||
representing the value. This is used in template evaluation.
|
||||
"""
|
||||
|
|
@ -83,7 +95,7 @@ class Type(ABC, Generic[T, N]):
|
|||
else:
|
||||
return str(value)
|
||||
|
||||
def parse(self, string: str) -> Union[T, N]:
|
||||
def parse(self, string: str) -> T | N:
|
||||
"""Parse a (possibly human-written) string and return the
|
||||
indicated value of this type.
|
||||
"""
|
||||
|
|
@ -92,7 +104,7 @@ class Type(ABC, Generic[T, N]):
|
|||
except ValueError:
|
||||
return self.null
|
||||
|
||||
def normalize(self, value: Any) -> Union[T, N]:
|
||||
def normalize(self, value: Any) -> T | N:
|
||||
"""Given a value that will be assigned into a field of this
|
||||
type, normalize the value to have the appropriate type. This
|
||||
base implementation only reinterprets `None`.
|
||||
|
|
@ -105,10 +117,7 @@ class Type(ABC, Generic[T, N]):
|
|||
# `self.model_type(value)`
|
||||
return cast(T, value)
|
||||
|
||||
def from_sql(
|
||||
self,
|
||||
sql_value: Union[None, int, float, str, bytes],
|
||||
) -> Union[T, N]:
|
||||
def from_sql(self, sql_value: SQLiteType) -> T | N:
|
||||
"""Receives the value stored in the SQL backend and return the
|
||||
value to be stored in the model.
|
||||
|
||||
|
|
@ -129,7 +138,7 @@ class Type(ABC, Generic[T, N]):
|
|||
else:
|
||||
return self.normalize(sql_value)
|
||||
|
||||
def to_sql(self, model_value: Any) -> Union[None, int, float, str, bytes]:
|
||||
def to_sql(self, model_value: Any) -> SQLiteType:
|
||||
"""Convert a value as stored in the model object to a value used
|
||||
by the database adapter.
|
||||
"""
|
||||
|
|
@ -154,7 +163,7 @@ class BaseInteger(Type[int, N]):
|
|||
query = NumericQuery
|
||||
model_type = int
|
||||
|
||||
def normalize(self, value: Any) -> Union[int, N]:
|
||||
def normalize(self, value: Any) -> int | N:
|
||||
try:
|
||||
return self.model_type(round(float(value)))
|
||||
except ValueError:
|
||||
|
|
@ -183,7 +192,7 @@ class BasePaddedInt(BaseInteger[N]):
|
|||
def __init__(self, digits: int):
|
||||
self.digits = digits
|
||||
|
||||
def format(self, value: Union[int, N]) -> str:
|
||||
def format(self, value: int | N) -> str:
|
||||
return "{0:0{1}d}".format(value or 0, self.digits)
|
||||
|
||||
|
||||
|
|
@ -232,13 +241,13 @@ class BaseFloat(Type[float, N]):
|
|||
"""
|
||||
|
||||
sql = "REAL"
|
||||
query: typing.Type[FieldQuery[Any]] = NumericQuery
|
||||
query: FieldQueryType = NumericQuery
|
||||
model_type = float
|
||||
|
||||
def __init__(self, digits: int = 1):
|
||||
self.digits = digits
|
||||
|
||||
def format(self, value: Union[float, N]) -> str:
|
||||
def format(self, value: float | N) -> str:
|
||||
return "{0:.{1}f}".format(value or 0, self.digits)
|
||||
|
||||
|
||||
|
|
@ -264,7 +273,7 @@ class BaseString(Type[T, N]):
|
|||
sql = "TEXT"
|
||||
query = SubstringQuery
|
||||
|
||||
def normalize(self, value: Any) -> Union[T, N]:
|
||||
def normalize(self, value: Any) -> T | N:
|
||||
if value is None:
|
||||
return self.null
|
||||
else:
|
||||
|
|
@ -277,7 +286,7 @@ class String(BaseString[str, Any]):
|
|||
model_type = str
|
||||
|
||||
|
||||
class DelimitedString(BaseString[List[str], List[str]]):
|
||||
class DelimitedString(BaseString[list[str], list[str]]):
|
||||
"""A list of Unicode strings, represented in-database by a single string
|
||||
containing delimiter-separated values.
|
||||
"""
|
||||
|
|
@ -287,7 +296,7 @@ class DelimitedString(BaseString[List[str], List[str]]):
|
|||
def __init__(self, delimiter: str):
|
||||
self.delimiter = delimiter
|
||||
|
||||
def format(self, value: List[str]):
|
||||
def format(self, value: list[str]):
|
||||
return self.delimiter.join(value)
|
||||
|
||||
def parse(self, string: str):
|
||||
|
|
@ -295,7 +304,7 @@ class DelimitedString(BaseString[List[str], List[str]]):
|
|||
return []
|
||||
return string.split(self.delimiter)
|
||||
|
||||
def to_sql(self, model_value: List[str]):
|
||||
def to_sql(self, model_value: list[str]):
|
||||
return self.delimiter.join(model_value)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -60,8 +60,7 @@ HISTORY_KEY = "taghistory"
|
|||
# def extend_reimport_fresh_fields_item():
|
||||
# importer.REIMPORT_FRESH_FIELDS_ITEM.extend(['tidal_track_popularity']
|
||||
# )
|
||||
REIMPORT_FRESH_FIELDS_ALBUM = ["data_source"]
|
||||
REIMPORT_FRESH_FIELDS_ITEM = [
|
||||
REIMPORT_FRESH_FIELDS_ALBUM = [
|
||||
"data_source",
|
||||
"bandcamp_album_id",
|
||||
"spotify_album_id",
|
||||
|
|
@ -69,12 +68,13 @@ REIMPORT_FRESH_FIELDS_ITEM = [
|
|||
"beatport_album_id",
|
||||
"tidal_album_id",
|
||||
]
|
||||
REIMPORT_FRESH_FIELDS_ITEM = list(REIMPORT_FRESH_FIELDS_ALBUM)
|
||||
|
||||
# Global logger.
|
||||
log = logging.getLogger("beets")
|
||||
|
||||
|
||||
class ImportAbort(Exception):
|
||||
class ImportAbortError(Exception):
|
||||
"""Raised when the user aborts the tagging operation."""
|
||||
|
||||
pass
|
||||
|
|
@ -360,7 +360,7 @@ class ImportSession:
|
|||
pl.run_parallel(QUEUE_SIZE)
|
||||
else:
|
||||
pl.run_sequential()
|
||||
except ImportAbort:
|
||||
except ImportAbortError:
|
||||
# User aborted operation. Silently stop.
|
||||
pass
|
||||
|
||||
|
|
@ -605,7 +605,7 @@ class ImportTask(BaseImportTask):
|
|||
"""
|
||||
items = self.imported_items()
|
||||
for field, view in config["import"]["set_fields"].items():
|
||||
value = view.get()
|
||||
value = str(view.get())
|
||||
log.debug(
|
||||
"Set field {1}={2} for {0}",
|
||||
displayable_path(self.paths),
|
||||
|
|
@ -627,8 +627,7 @@ class ImportTask(BaseImportTask):
|
|||
self.save_progress()
|
||||
if session.config["incremental"] and not (
|
||||
# Should we skip recording to incremental list?
|
||||
self.skip
|
||||
and session.config["incremental_skip_later"]
|
||||
self.skip and session.config["incremental_skip_later"]
|
||||
):
|
||||
self.save_history()
|
||||
|
||||
|
|
@ -815,9 +814,16 @@ class ImportTask(BaseImportTask):
|
|||
with lib.transaction():
|
||||
self.record_replaced(lib)
|
||||
self.remove_replaced(lib)
|
||||
|
||||
self.album = lib.add_album(self.imported_items())
|
||||
if "data_source" in self.imported_items()[0]:
|
||||
self.album.data_source = self.imported_items()[0].data_source
|
||||
if self.choice_flag == action.APPLY:
|
||||
# Copy album flexible fields to the DB
|
||||
# TODO: change the flow so we create the `Album` object earlier,
|
||||
# and we can move this into `self.apply_metadata`, just like
|
||||
# is done for tracks.
|
||||
autotag.apply_album_metadata(self.match.info, self.album)
|
||||
self.album.store()
|
||||
|
||||
self.reimport_metadata(lib)
|
||||
|
||||
def record_replaced(self, lib):
|
||||
|
|
@ -940,7 +946,7 @@ class ImportTask(BaseImportTask):
|
|||
dup_item.remove()
|
||||
log.debug(
|
||||
"{0} of {1} items replaced",
|
||||
sum(bool(l) for l in self.replaced_items.values()),
|
||||
sum(bool(v) for v in self.replaced_items.values()),
|
||||
len(self.imported_items()),
|
||||
)
|
||||
|
||||
|
|
@ -1056,7 +1062,7 @@ class SingletonImportTask(ImportTask):
|
|||
values, for the singleton item.
|
||||
"""
|
||||
for field, view in config["import"]["set_fields"].items():
|
||||
value = view.get()
|
||||
value = str(view.get())
|
||||
log.debug(
|
||||
"Set field {1}={2} for {0}",
|
||||
displayable_path(self.paths),
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""The core data store and collection logic for beets.
|
||||
"""
|
||||
"""The core data store and collection logic for beets."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
|
|
@ -24,7 +24,9 @@ import sys
|
|||
import time
|
||||
import unicodedata
|
||||
from functools import cached_property
|
||||
from pathlib import Path
|
||||
|
||||
import platformdirs
|
||||
from mediafile import MediaFile, UnreadableFileError
|
||||
|
||||
import beets
|
||||
|
|
@ -293,50 +295,6 @@ class DurationType(types.Float):
|
|||
return self.null
|
||||
|
||||
|
||||
# Library-specific sort types.
|
||||
|
||||
|
||||
class SmartArtistSort(dbcore.query.Sort):
|
||||
"""Sort by artist (either album artist or track artist),
|
||||
prioritizing the sort field over the raw field.
|
||||
"""
|
||||
|
||||
def __init__(self, model_cls, ascending=True, case_insensitive=True):
|
||||
self.album = model_cls is Album
|
||||
self.ascending = ascending
|
||||
self.case_insensitive = case_insensitive
|
||||
|
||||
def order_clause(self):
|
||||
order = "ASC" if self.ascending else "DESC"
|
||||
field = "albumartist" if self.album else "artist"
|
||||
collate = "COLLATE NOCASE" if self.case_insensitive else ""
|
||||
return (
|
||||
"(CASE {0}_sort WHEN NULL THEN {0} "
|
||||
'WHEN "" THEN {0} '
|
||||
"ELSE {0}_sort END) {1} {2}"
|
||||
).format(field, collate, order)
|
||||
|
||||
def sort(self, objs):
|
||||
if self.album:
|
||||
|
||||
def field(a):
|
||||
return a.albumartist_sort or a.albumartist
|
||||
|
||||
else:
|
||||
|
||||
def field(i):
|
||||
return i.artist_sort or i.artist
|
||||
|
||||
if self.case_insensitive:
|
||||
|
||||
def key(x):
|
||||
return field(x).lower()
|
||||
|
||||
else:
|
||||
key = field
|
||||
return sorted(objs, key=key, reverse=not self.ascending)
|
||||
|
||||
|
||||
# Special path format key.
|
||||
PF_KEY_DEFAULT = "default"
|
||||
|
||||
|
|
@ -382,7 +340,7 @@ class WriteError(FileOperationError):
|
|||
# Item and Album model classes.
|
||||
|
||||
|
||||
class LibModel(dbcore.Model):
|
||||
class LibModel(dbcore.Model["Library"]):
|
||||
"""Shared concrete functionality for Items and Albums."""
|
||||
|
||||
# Config key that specifies how an instance should be formatted.
|
||||
|
|
@ -633,7 +591,7 @@ class Item(LibModel):
|
|||
|
||||
_formatter = FormattedItemMapping
|
||||
|
||||
_sorts = {"artist": SmartArtistSort}
|
||||
_sorts = {"artist": dbcore.query.SmartArtistSort}
|
||||
|
||||
_queries = {"singleton": SingletonQuery}
|
||||
|
||||
|
|
@ -658,6 +616,11 @@ class Item(LibModel):
|
|||
f"ON {cls._table}.album_id = {cls._relation._table}.id"
|
||||
)
|
||||
|
||||
@property
|
||||
def filepath(self) -> Path | None:
|
||||
"""The path to the item's file as pathlib.Path."""
|
||||
return Path(os.fsdecode(self.path)) if self.path else self.path
|
||||
|
||||
@property
|
||||
def _cached_album(self):
|
||||
"""The Album object that this item belongs to, if any, or
|
||||
|
|
@ -1074,10 +1037,10 @@ class Item(LibModel):
|
|||
instead of encoded as a bytestring. basedir can override the library's
|
||||
base directory for the destination.
|
||||
"""
|
||||
self._check_db()
|
||||
db = self._check_db()
|
||||
platform = platform or sys.platform
|
||||
basedir = basedir or self._db.directory
|
||||
path_formats = path_formats or self._db.path_formats
|
||||
basedir = basedir or db.directory
|
||||
path_formats = path_formats or db.path_formats
|
||||
if replacements is None:
|
||||
replacements = self._db.replacements
|
||||
|
||||
|
|
@ -1120,7 +1083,7 @@ class Item(LibModel):
|
|||
maxlen = beets.config["max_filename_length"].get(int)
|
||||
if not maxlen:
|
||||
# When zero, try to determine from filesystem.
|
||||
maxlen = util.max_filename_length(self._db.directory)
|
||||
maxlen = util.max_filename_length(db.directory)
|
||||
|
||||
subpath, fellback = util.legalize_path(
|
||||
subpath,
|
||||
|
|
@ -1208,8 +1171,8 @@ class Album(LibModel):
|
|||
}
|
||||
|
||||
_sorts = {
|
||||
"albumartist": SmartArtistSort,
|
||||
"artist": SmartArtistSort,
|
||||
"albumartist": dbcore.query.SmartArtistSort,
|
||||
"artist": dbcore.query.SmartArtistSort,
|
||||
}
|
||||
|
||||
# List of keys that are set on an album's items.
|
||||
|
|
@ -1595,18 +1558,20 @@ class Library(dbcore.Database):
|
|||
def __init__(
|
||||
self,
|
||||
path="library.blb",
|
||||
directory="~/Music",
|
||||
directory: str | None = None,
|
||||
path_formats=((PF_KEY_DEFAULT, "$artist/$album/$track $title"),),
|
||||
replacements=None,
|
||||
):
|
||||
timeout = beets.config["timeout"].as_number()
|
||||
super().__init__(path, timeout=timeout)
|
||||
|
||||
self.directory = bytestring_path(normpath(directory))
|
||||
self.directory = normpath(directory or platformdirs.user_music_path())
|
||||
|
||||
self.path_formats = path_formats
|
||||
self.replacements = replacements
|
||||
|
||||
self._memotable = {} # Used for template substitution performance.
|
||||
# Used for template substitution performance.
|
||||
self._memotable: dict[tuple[str, ...], str] = {}
|
||||
|
||||
# Adding objects to the database.
|
||||
|
||||
|
|
@ -1741,6 +1706,11 @@ class DefaultTemplateFunctions:
|
|||
|
||||
_prefix = "tmpl_"
|
||||
|
||||
@cached_classproperty
|
||||
def _func_names(cls) -> list[str]:
|
||||
"""Names of tmpl_* functions in this class."""
|
||||
return [s for s in dir(cls) if s.startswith(cls._prefix)]
|
||||
|
||||
def __init__(self, item=None, lib=None):
|
||||
"""Parametrize the functions.
|
||||
|
||||
|
|
@ -1772,6 +1742,11 @@ class DefaultTemplateFunctions:
|
|||
"""Convert a string to upper case."""
|
||||
return s.upper()
|
||||
|
||||
@staticmethod
|
||||
def tmpl_capitalize(s):
|
||||
"""Converts to a capitalized string."""
|
||||
return s.capitalize()
|
||||
|
||||
@staticmethod
|
||||
def tmpl_title(s):
|
||||
"""Convert a string to title case."""
|
||||
|
|
@ -2038,11 +2013,3 @@ class DefaultTemplateFunctions:
|
|||
return trueval if trueval else self.item.formatted().get(field)
|
||||
else:
|
||||
return falseval
|
||||
|
||||
|
||||
# Get the name of tmpl_* functions in the above class.
|
||||
DefaultTemplateFunctions._func_names = [
|
||||
s
|
||||
for s in dir(DefaultTemplateFunctions)
|
||||
if s.startswith(DefaultTemplateFunctions._prefix)
|
||||
]
|
||||
|
|
|
|||
|
|
@ -20,10 +20,34 @@ use {}-style formatting and can interpolate keywords arguments to the logging
|
|||
calls (`debug`, `info`, etc).
|
||||
"""
|
||||
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from copy import copy
|
||||
from logging import (
|
||||
DEBUG,
|
||||
INFO,
|
||||
NOTSET,
|
||||
WARNING,
|
||||
FileHandler,
|
||||
Filter,
|
||||
Handler,
|
||||
Logger,
|
||||
NullHandler,
|
||||
StreamHandler,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"DEBUG",
|
||||
"INFO",
|
||||
"NOTSET",
|
||||
"WARNING",
|
||||
"FileHandler",
|
||||
"Filter",
|
||||
"Handler",
|
||||
"Logger",
|
||||
"NullHandler",
|
||||
"StreamHandler",
|
||||
"getLogger",
|
||||
]
|
||||
|
||||
|
||||
def logsafe(val):
|
||||
|
|
@ -46,7 +70,7 @@ def logsafe(val):
|
|||
return val
|
||||
|
||||
|
||||
class StrFormatLogger(logging.Logger):
|
||||
class StrFormatLogger(Logger):
|
||||
"""A version of `Logger` that uses `str.format`-style formatting
|
||||
instead of %-style formatting and supports keyword arguments.
|
||||
|
||||
|
|
@ -96,12 +120,12 @@ class StrFormatLogger(logging.Logger):
|
|||
)
|
||||
|
||||
|
||||
class ThreadLocalLevelLogger(logging.Logger):
|
||||
class ThreadLocalLevelLogger(Logger):
|
||||
"""A version of `Logger` whose level is thread-local instead of shared."""
|
||||
|
||||
def __init__(self, name, level=logging.NOTSET):
|
||||
def __init__(self, name, level=NOTSET):
|
||||
self._thread_level = threading.local()
|
||||
self.default_level = logging.NOTSET
|
||||
self.default_level = NOTSET
|
||||
super().__init__(name, level)
|
||||
|
||||
@property
|
||||
|
|
@ -128,17 +152,13 @@ class BeetsLogger(ThreadLocalLevelLogger, StrFormatLogger):
|
|||
pass
|
||||
|
||||
|
||||
my_manager = copy(logging.Logger.manager)
|
||||
my_manager = copy(Logger.manager)
|
||||
my_manager.loggerClass = BeetsLogger
|
||||
|
||||
|
||||
# Act like the stdlib logging module by re-exporting its namespace.
|
||||
from logging import * # noqa
|
||||
|
||||
|
||||
# Override the `getLogger` to use our machinery.
|
||||
def getLogger(name=None): # noqa
|
||||
if name:
|
||||
return my_manager.getLogger(name)
|
||||
else:
|
||||
return logging.Logger.root
|
||||
return Logger.root
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
"""Support for beets plugins."""
|
||||
|
||||
|
||||
import abc
|
||||
import inspect
|
||||
import re
|
||||
|
|
@ -36,7 +35,7 @@ LASTFM_KEY = "2dc3914abf35f0d9c92d97d8f8e42b43"
|
|||
log = logging.getLogger("beets")
|
||||
|
||||
|
||||
class PluginConflictException(Exception):
|
||||
class PluginConflictError(Exception):
|
||||
"""Indicates that the services provided by one plugin conflict with
|
||||
those of another.
|
||||
|
||||
|
|
@ -343,7 +342,7 @@ def types(model_cls):
|
|||
plugin_types = getattr(plugin, attr_name, {})
|
||||
for field in plugin_types:
|
||||
if field in types and plugin_types[field] != types[field]:
|
||||
raise PluginConflictException(
|
||||
raise PluginConflictError(
|
||||
"Plugin {} defines flexible field {} "
|
||||
"which has already been defined with "
|
||||
"another type.".format(plugin.name, field)
|
||||
|
|
@ -447,13 +446,13 @@ def import_stages():
|
|||
def _check_conflicts_and_merge(plugin, plugin_funcs, funcs):
|
||||
"""Check the provided template functions for conflicts and merge into funcs.
|
||||
|
||||
Raises a `PluginConflictException` if a plugin defines template functions
|
||||
Raises a `PluginConflictError` if a plugin defines template functions
|
||||
for fields that another plugin has already defined template functions for.
|
||||
"""
|
||||
if plugin_funcs:
|
||||
if not plugin_funcs.keys().isdisjoint(funcs.keys()):
|
||||
conflicted_fields = ", ".join(plugin_funcs.keys() & funcs.keys())
|
||||
raise PluginConflictException(
|
||||
raise PluginConflictError(
|
||||
f"Plugin {plugin.name} defines template functions for "
|
||||
f"{conflicted_fields} that conflict with another plugin."
|
||||
)
|
||||
|
|
@ -519,7 +518,7 @@ def feat_tokens(for_artist=True):
|
|||
feat_words = ["ft", "featuring", "feat", "feat.", "ft."]
|
||||
if for_artist:
|
||||
feat_words += ["with", "vs", "and", "con", "&"]
|
||||
return r"(?<=\s)(?:{})(?=\s)".format(
|
||||
return r"(?<=[\s(\[])(?:{})(?=\s)".format(
|
||||
"|".join(re.escape(x) for x in feat_words)
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -15,22 +15,18 @@
|
|||
"""Some common functionality for beets' test cases."""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import unittest
|
||||
from contextlib import contextmanager
|
||||
|
||||
import beets # noqa: E402
|
||||
import beets.library # noqa: E402
|
||||
import beets
|
||||
import beets.library
|
||||
|
||||
# Make sure the development versions of the plugins are used
|
||||
import beetsplug # noqa: E402
|
||||
from beets import util # noqa: E402
|
||||
from beets import importer, logging # noqa: E402
|
||||
from beets.ui import commands # noqa: E402
|
||||
from beets.util import bytestring_path, syspath # noqa: E402
|
||||
import beetsplug
|
||||
from beets import importer, logging, util
|
||||
from beets.ui import commands
|
||||
from beets.util import syspath
|
||||
|
||||
beetsplug.__path__ = [
|
||||
os.path.abspath(
|
||||
|
|
@ -62,24 +58,12 @@ log = logging.getLogger("beets")
|
|||
log.propagate = True
|
||||
log.setLevel(logging.DEBUG)
|
||||
|
||||
# Dummy item creation.
|
||||
_item_ident = 0
|
||||
|
||||
# OS feature test.
|
||||
HAVE_SYMLINK = sys.platform != "win32"
|
||||
HAVE_HARDLINK = sys.platform != "win32"
|
||||
|
||||
try:
|
||||
import reflink
|
||||
|
||||
HAVE_REFLINK = reflink.supported_at(tempfile.gettempdir())
|
||||
except ImportError:
|
||||
HAVE_REFLINK = False
|
||||
|
||||
|
||||
def item(lib=None):
|
||||
global _item_ident
|
||||
_item_ident += 1
|
||||
i = beets.library.Item(
|
||||
title="the title",
|
||||
artist="the artist",
|
||||
|
|
@ -104,7 +88,6 @@ def item(lib=None):
|
|||
comments="the comments",
|
||||
bpm=8,
|
||||
comp=True,
|
||||
path=f"somepath{_item_ident}",
|
||||
length=60.0,
|
||||
bitrate=128000,
|
||||
format="FLAC",
|
||||
|
|
@ -121,33 +104,6 @@ def item(lib=None):
|
|||
return i
|
||||
|
||||
|
||||
_album_ident = 0
|
||||
|
||||
|
||||
def album(lib=None):
|
||||
global _item_ident
|
||||
_item_ident += 1
|
||||
i = beets.library.Album(
|
||||
artpath=None,
|
||||
albumartist="some album artist",
|
||||
albumartist_sort="some sort album artist",
|
||||
albumartist_credit="some album artist credit",
|
||||
album="the album",
|
||||
genre="the genre",
|
||||
year=2014,
|
||||
month=2,
|
||||
day=5,
|
||||
tracktotal=0,
|
||||
disctotal=1,
|
||||
comp=False,
|
||||
mb_albumid="someID-1",
|
||||
mb_albumartistid="someID-1",
|
||||
)
|
||||
if lib:
|
||||
lib.add(i)
|
||||
return i
|
||||
|
||||
|
||||
# Dummy import session.
|
||||
def import_session(lib=None, loghandler=None, paths=[], query=[], cli=False):
|
||||
cls = commands.TerminalImportSession if cli else importer.ImportSession
|
||||
|
|
@ -157,137 +113,35 @@ def import_session(lib=None, loghandler=None, paths=[], query=[], cli=False):
|
|||
class Assertions:
|
||||
"""A mixin with additional unit test assertions."""
|
||||
|
||||
def assertExists(self, path): # noqa
|
||||
self.assertTrue(
|
||||
os.path.exists(syspath(path)), f"file does not exist: {path!r}"
|
||||
)
|
||||
def assertExists(self, path):
|
||||
assert os.path.exists(syspath(path)), f"file does not exist: {path!r}"
|
||||
|
||||
def assertNotExists(self, path): # noqa
|
||||
self.assertFalse(
|
||||
os.path.exists(syspath(path)), f"file exists: {path!r}"
|
||||
)
|
||||
def assertNotExists(self, path):
|
||||
assert not os.path.exists(syspath(path)), f"file exists: {path!r}"
|
||||
|
||||
def assertIsFile(self, path): # noqa
|
||||
def assertIsFile(self, path):
|
||||
self.assertExists(path)
|
||||
self.assertTrue(
|
||||
os.path.isfile(syspath(path)),
|
||||
"path exists, but is not a regular file: {!r}".format(path),
|
||||
)
|
||||
assert os.path.isfile(
|
||||
syspath(path)
|
||||
), "path exists, but is not a regular file: {!r}".format(path)
|
||||
|
||||
def assertIsDir(self, path): # noqa
|
||||
def assertIsDir(self, path):
|
||||
self.assertExists(path)
|
||||
self.assertTrue(
|
||||
os.path.isdir(syspath(path)),
|
||||
"path exists, but is not a directory: {!r}".format(path),
|
||||
)
|
||||
assert os.path.isdir(
|
||||
syspath(path)
|
||||
), "path exists, but is not a directory: {!r}".format(path)
|
||||
|
||||
def assert_equal_path(self, a, b):
|
||||
"""Check that two paths are equal."""
|
||||
self.assertEqual(
|
||||
util.normpath(a),
|
||||
util.normpath(b),
|
||||
f"paths are not equal: {a!r} and {b!r}",
|
||||
)
|
||||
a_bytes, b_bytes = util.normpath(a), util.normpath(b)
|
||||
|
||||
|
||||
# A test harness for all beets tests.
|
||||
# Provides temporary, isolated configuration.
|
||||
class TestCase(unittest.TestCase, Assertions):
|
||||
"""A unittest.TestCase subclass that saves and restores beets'
|
||||
global configuration. This allows tests to make temporary
|
||||
modifications that will then be automatically removed when the test
|
||||
completes. Also provides some additional assertion methods, a
|
||||
temporary directory, and a DummyIO.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
# A "clean" source list including only the defaults.
|
||||
beets.config.sources = []
|
||||
beets.config.read(user=False, defaults=True)
|
||||
|
||||
# Direct paths to a temporary directory. Tests can also use this
|
||||
# temporary directory.
|
||||
self.temp_dir = util.bytestring_path(tempfile.mkdtemp())
|
||||
|
||||
beets.config["statefile"] = os.fsdecode(
|
||||
os.path.join(self.temp_dir, b"state.pickle")
|
||||
)
|
||||
beets.config["library"] = os.fsdecode(
|
||||
os.path.join(self.temp_dir, b"library.db")
|
||||
)
|
||||
beets.config["directory"] = os.fsdecode(
|
||||
os.path.join(self.temp_dir, b"libdir")
|
||||
)
|
||||
|
||||
# Set $HOME, which is used by Confuse to create directories.
|
||||
self._old_home = os.environ.get("HOME")
|
||||
os.environ["HOME"] = os.fsdecode(self.temp_dir)
|
||||
|
||||
# Initialize, but don't install, a DummyIO.
|
||||
self.io = DummyIO()
|
||||
|
||||
def tearDown(self):
|
||||
if os.path.isdir(syspath(self.temp_dir)):
|
||||
shutil.rmtree(syspath(self.temp_dir))
|
||||
if self._old_home is None:
|
||||
del os.environ["HOME"]
|
||||
else:
|
||||
os.environ["HOME"] = self._old_home
|
||||
self.io.restore()
|
||||
|
||||
beets.config.clear()
|
||||
beets.config._materialized = False
|
||||
|
||||
|
||||
class LibTestCase(TestCase):
|
||||
"""A test case that includes an in-memory library object (`lib`) and
|
||||
an item added to the library (`i`).
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.lib = beets.library.Library(":memory:")
|
||||
self.i = item(self.lib)
|
||||
|
||||
def tearDown(self):
|
||||
self.lib._connection().close()
|
||||
super().tearDown()
|
||||
|
||||
|
||||
# Mock timing.
|
||||
|
||||
|
||||
class Timecop:
|
||||
"""Mocks the timing system (namely time() and sleep()) for testing.
|
||||
Inspired by the Ruby timecop library.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.now = time.time()
|
||||
|
||||
def time(self):
|
||||
return self.now
|
||||
|
||||
def sleep(self, amount):
|
||||
self.now += amount
|
||||
|
||||
def install(self):
|
||||
self.orig = {
|
||||
"time": time.time,
|
||||
"sleep": time.sleep,
|
||||
}
|
||||
time.time = self.time
|
||||
time.sleep = self.sleep
|
||||
|
||||
def restore(self):
|
||||
time.time = self.orig["time"]
|
||||
time.sleep = self.orig["sleep"]
|
||||
assert a_bytes == b_bytes, f"{a_bytes=} != {b_bytes=}"
|
||||
|
||||
|
||||
# Mock I/O.
|
||||
|
||||
|
||||
class InputException(Exception):
|
||||
class InputError(Exception):
|
||||
def __init__(self, output=None):
|
||||
self.output = output
|
||||
|
||||
|
|
@ -334,9 +188,9 @@ class DummyIn:
|
|||
def readline(self):
|
||||
if not self.buf:
|
||||
if self.out:
|
||||
raise InputException(self.out.get())
|
||||
raise InputError(self.out.get())
|
||||
else:
|
||||
raise InputException()
|
||||
raise InputError()
|
||||
self.reads += 1
|
||||
return self.buf.pop(0)
|
||||
|
||||
|
|
@ -388,25 +242,6 @@ class Bag:
|
|||
return self.fields.get(key)
|
||||
|
||||
|
||||
# Convenience methods for setting up a temporary sandbox directory for tests
|
||||
# that need to interact with the filesystem.
|
||||
|
||||
|
||||
class TempDirMixin:
|
||||
"""Text mixin for creating and deleting a temporary directory."""
|
||||
|
||||
def create_temp_dir(self):
|
||||
"""Create a temporary directory and assign it into `self.temp_dir`.
|
||||
Call `remove_temp_dir` later to delete it.
|
||||
"""
|
||||
self.temp_dir = bytestring_path(tempfile.mkdtemp())
|
||||
|
||||
def remove_temp_dir(self):
|
||||
"""Delete the temporary directory created by `create_temp_dir`."""
|
||||
if os.path.isdir(syspath(self.temp_dir)):
|
||||
shutil.rmtree(syspath(self.temp_dir))
|
||||
|
||||
|
||||
# Platform mocking.
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -20,9 +20,6 @@ information or mock the environment.
|
|||
|
||||
- `has_program` checks the presence of a command on the system.
|
||||
|
||||
- The `generate_album_info` and `generate_track_info` functions return
|
||||
fixtures to be used when mocking the autotagger.
|
||||
|
||||
- The `ImportSessionFixture` allows one to run importer code while
|
||||
controlling the interactions through code.
|
||||
|
||||
|
|
@ -36,19 +33,24 @@ import os.path
|
|||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import unittest
|
||||
from contextlib import contextmanager
|
||||
from enum import Enum
|
||||
from functools import cached_property
|
||||
from io import StringIO
|
||||
from tempfile import mkdtemp, mkstemp
|
||||
from typing import ClassVar
|
||||
from pathlib import Path
|
||||
from tempfile import gettempdir, mkdtemp, mkstemp
|
||||
from typing import Any, ClassVar
|
||||
from unittest.mock import patch
|
||||
|
||||
import responses
|
||||
from mediafile import Image, MediaFile
|
||||
|
||||
import beets
|
||||
import beets.plugins
|
||||
from beets import autotag, config, importer, logging, util
|
||||
from beets import autotag, importer, logging, util
|
||||
from beets.autotag.hooks import AlbumInfo, TrackInfo
|
||||
from beets.importer import ImportSession
|
||||
from beets.library import Album, Item, Library
|
||||
from beets.test import _common
|
||||
from beets.ui.commands import TerminalImportSession
|
||||
|
|
@ -142,16 +144,47 @@ def has_program(cmd, args=["--version"]):
|
|||
return True
|
||||
|
||||
|
||||
class TestHelper:
|
||||
def check_reflink_support(path: str) -> bool:
|
||||
try:
|
||||
import reflink
|
||||
except ImportError:
|
||||
return False
|
||||
|
||||
return reflink.supported_at(path)
|
||||
|
||||
|
||||
class ConfigMixin:
|
||||
@cached_property
|
||||
def config(self) -> beets.IncludeLazyConfig:
|
||||
"""Base beets configuration for tests."""
|
||||
config = beets.config
|
||||
config.sources = []
|
||||
config.read(user=False, defaults=True)
|
||||
|
||||
config["plugins"] = []
|
||||
config["verbose"] = 1
|
||||
config["ui"]["color"] = False
|
||||
config["threaded"] = False
|
||||
return config
|
||||
|
||||
|
||||
NEEDS_REFLINK = unittest.skipUnless(
|
||||
check_reflink_support(gettempdir()), "no reflink support for libdir"
|
||||
)
|
||||
|
||||
|
||||
class TestHelper(_common.Assertions, ConfigMixin):
|
||||
"""Helper mixin for high-level cli and plugin tests.
|
||||
|
||||
This mixin provides methods to isolate beets' global state provide
|
||||
fixtures.
|
||||
"""
|
||||
|
||||
db_on_disk: ClassVar[bool] = False
|
||||
|
||||
# TODO automate teardown through hook registration
|
||||
|
||||
def setup_beets(self, disk=False):
|
||||
def setup_beets(self):
|
||||
"""Setup pristine global configuration and library for testing.
|
||||
|
||||
Sets ``beets.config`` so we can safely use any functionality
|
||||
|
|
@ -166,129 +199,40 @@ class TestHelper:
|
|||
- ``libdir`` Path to a subfolder of ``temp_dir``, containing the
|
||||
library's media files. Same as ``config['directory']``.
|
||||
|
||||
- ``config`` The global configuration used by beets.
|
||||
|
||||
- ``lib`` Library instance created with the settings from
|
||||
``config``.
|
||||
|
||||
Make sure you call ``teardown_beets()`` afterwards.
|
||||
"""
|
||||
self.create_temp_dir()
|
||||
os.environ["BEETSDIR"] = os.fsdecode(self.temp_dir)
|
||||
|
||||
self.config = beets.config
|
||||
self.config.clear()
|
||||
self.config.read()
|
||||
|
||||
self.config["plugins"] = []
|
||||
self.config["verbose"] = 1
|
||||
self.config["ui"]["color"] = False
|
||||
self.config["threaded"] = False
|
||||
temp_dir_str = os.fsdecode(self.temp_dir)
|
||||
self.env_patcher = patch.dict(
|
||||
"os.environ",
|
||||
{
|
||||
"BEETSDIR": temp_dir_str,
|
||||
"HOME": temp_dir_str, # used by Confuse to create directories.
|
||||
},
|
||||
)
|
||||
self.env_patcher.start()
|
||||
|
||||
self.libdir = os.path.join(self.temp_dir, b"libdir")
|
||||
os.mkdir(syspath(self.libdir))
|
||||
self.config["directory"] = os.fsdecode(self.libdir)
|
||||
|
||||
if disk:
|
||||
if self.db_on_disk:
|
||||
dbpath = util.bytestring_path(self.config["library"].as_filename())
|
||||
else:
|
||||
dbpath = ":memory:"
|
||||
self.lib = Library(dbpath, self.libdir)
|
||||
|
||||
# Initialize, but don't install, a DummyIO.
|
||||
self.io = _common.DummyIO()
|
||||
|
||||
def teardown_beets(self):
|
||||
self.env_patcher.stop()
|
||||
self.io.restore()
|
||||
self.lib._close()
|
||||
if "BEETSDIR" in os.environ:
|
||||
del os.environ["BEETSDIR"]
|
||||
self.remove_temp_dir()
|
||||
self.config.clear()
|
||||
beets.config.read(user=False, defaults=True)
|
||||
|
||||
def load_plugins(self, *plugins):
|
||||
"""Load and initialize plugins by names.
|
||||
|
||||
Similar setting a list of plugins in the configuration. Make
|
||||
sure you call ``unload_plugins()`` afterwards.
|
||||
"""
|
||||
# FIXME this should eventually be handled by a plugin manager
|
||||
beets.config["plugins"] = plugins
|
||||
beets.plugins.load_plugins(plugins)
|
||||
beets.plugins.find_plugins()
|
||||
|
||||
# Take a backup of the original _types and _queries to restore
|
||||
# when unloading.
|
||||
Item._original_types = dict(Item._types)
|
||||
Album._original_types = dict(Album._types)
|
||||
Item._types.update(beets.plugins.types(Item))
|
||||
Album._types.update(beets.plugins.types(Album))
|
||||
|
||||
Item._original_queries = dict(Item._queries)
|
||||
Album._original_queries = dict(Album._queries)
|
||||
Item._queries.update(beets.plugins.named_queries(Item))
|
||||
Album._queries.update(beets.plugins.named_queries(Album))
|
||||
|
||||
def unload_plugins(self):
|
||||
"""Unload all plugins and remove the from the configuration."""
|
||||
# FIXME this should eventually be handled by a plugin manager
|
||||
beets.config["plugins"] = []
|
||||
beets.plugins._classes = set()
|
||||
beets.plugins._instances = {}
|
||||
Item._types = Item._original_types
|
||||
Album._types = Album._original_types
|
||||
Item._queries = Item._original_queries
|
||||
Album._queries = Album._original_queries
|
||||
|
||||
def create_importer(self, item_count=1, album_count=1):
|
||||
"""Create files to import and return corresponding session.
|
||||
|
||||
Copies the specified number of files to a subdirectory of
|
||||
`self.temp_dir` and creates a `ImportSessionFixture` for this path.
|
||||
"""
|
||||
import_dir = os.path.join(self.temp_dir, b"import")
|
||||
if not os.path.isdir(syspath(import_dir)):
|
||||
os.mkdir(syspath(import_dir))
|
||||
|
||||
album_no = 0
|
||||
while album_count:
|
||||
album = util.bytestring_path(f"album {album_no}")
|
||||
album_dir = os.path.join(import_dir, album)
|
||||
if os.path.exists(syspath(album_dir)):
|
||||
album_no += 1
|
||||
continue
|
||||
os.mkdir(syspath(album_dir))
|
||||
album_count -= 1
|
||||
|
||||
track_no = 0
|
||||
album_item_count = item_count
|
||||
while album_item_count:
|
||||
title = f"track {track_no}"
|
||||
src = os.path.join(_common.RSRC, b"full.mp3")
|
||||
title_file = util.bytestring_path(f"{title}.mp3")
|
||||
dest = os.path.join(album_dir, title_file)
|
||||
if os.path.exists(syspath(dest)):
|
||||
track_no += 1
|
||||
continue
|
||||
album_item_count -= 1
|
||||
shutil.copy(syspath(src), syspath(dest))
|
||||
mediafile = MediaFile(dest)
|
||||
mediafile.update(
|
||||
{
|
||||
"artist": "artist",
|
||||
"albumartist": "album artist",
|
||||
"title": title,
|
||||
"album": album,
|
||||
"mb_albumid": None,
|
||||
"mb_trackid": None,
|
||||
}
|
||||
)
|
||||
mediafile.save()
|
||||
|
||||
config["import"]["quiet"] = True
|
||||
config["import"]["autotag"] = False
|
||||
config["import"]["resume"] = False
|
||||
|
||||
return ImportSessionFixture(
|
||||
self.lib, loghandler=None, query=None, paths=[import_dir]
|
||||
)
|
||||
|
||||
# Library fixtures methods
|
||||
|
||||
|
|
@ -304,16 +248,15 @@ class TestHelper:
|
|||
|
||||
The item is attached to the database from `self.lib`.
|
||||
"""
|
||||
item_count = self._get_item_count()
|
||||
values_ = {
|
||||
"title": "t\u00eftle {0}",
|
||||
"artist": "the \u00e4rtist",
|
||||
"album": "the \u00e4lbum",
|
||||
"track": item_count,
|
||||
"track": 1,
|
||||
"format": "MP3",
|
||||
}
|
||||
values_.update(values)
|
||||
values_["title"] = values_["title"].format(item_count)
|
||||
values_["title"] = values_["title"].format(1)
|
||||
values_["db"] = self.lib
|
||||
item = Item(**values_)
|
||||
if "path" not in values:
|
||||
|
|
@ -430,12 +373,6 @@ class TestHelper:
|
|||
|
||||
return path
|
||||
|
||||
def _get_item_count(self):
|
||||
if not hasattr(self, "__item_count"):
|
||||
count = 0
|
||||
self.__item_count = count + 1
|
||||
return count
|
||||
|
||||
# Running beets commands
|
||||
|
||||
def run_command(self, *args, **kwargs):
|
||||
|
|
@ -457,11 +394,11 @@ class TestHelper:
|
|||
|
||||
# Safe file operations
|
||||
|
||||
def create_temp_dir(self):
|
||||
def create_temp_dir(self, **kwargs):
|
||||
"""Create a temporary directory and assign it into
|
||||
`self.temp_dir`. Call `remove_temp_dir` later to delete it.
|
||||
"""
|
||||
temp_dir = mkdtemp()
|
||||
temp_dir = mkdtemp(**kwargs)
|
||||
self.temp_dir = util.bytestring_path(temp_dir)
|
||||
|
||||
def remove_temp_dir(self):
|
||||
|
|
@ -490,99 +427,212 @@ class TestHelper:
|
|||
return path
|
||||
|
||||
|
||||
# A test harness for all beets tests.
|
||||
# Provides temporary, isolated configuration.
|
||||
class BeetsTestCase(unittest.TestCase, TestHelper):
|
||||
"""A unittest.TestCase subclass that saves and restores beets'
|
||||
global configuration. This allows tests to make temporary
|
||||
modifications that will then be automatically removed when the test
|
||||
completes. Also provides some additional assertion methods, a
|
||||
temporary directory, and a DummyIO.
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
self.setup_beets()
|
||||
|
||||
def tearDown(self):
|
||||
self.teardown_beets()
|
||||
|
||||
|
||||
class ItemInDBTestCase(BeetsTestCase):
|
||||
"""A test case that includes an in-memory library object (`lib`) and
|
||||
an item added to the library (`i`).
|
||||
"""
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.i = _common.item(self.lib)
|
||||
|
||||
|
||||
class PluginMixin(ConfigMixin):
|
||||
plugin: ClassVar[str]
|
||||
preload_plugin: ClassVar[bool] = True
|
||||
|
||||
def setup_beets(self):
|
||||
super().setup_beets()
|
||||
if self.preload_plugin:
|
||||
self.load_plugins()
|
||||
|
||||
def teardown_beets(self):
|
||||
super().teardown_beets()
|
||||
self.unload_plugins()
|
||||
|
||||
def load_plugins(self, *plugins: str) -> None:
|
||||
"""Load and initialize plugins by names.
|
||||
|
||||
Similar setting a list of plugins in the configuration. Make
|
||||
sure you call ``unload_plugins()`` afterwards.
|
||||
"""
|
||||
# FIXME this should eventually be handled by a plugin manager
|
||||
plugins = (self.plugin,) if hasattr(self, "plugin") else plugins
|
||||
self.config["plugins"] = plugins
|
||||
beets.plugins.load_plugins(plugins)
|
||||
beets.plugins.find_plugins()
|
||||
|
||||
# Take a backup of the original _types and _queries to restore
|
||||
# when unloading.
|
||||
Item._original_types = dict(Item._types)
|
||||
Album._original_types = dict(Album._types)
|
||||
Item._types.update(beets.plugins.types(Item))
|
||||
Album._types.update(beets.plugins.types(Album))
|
||||
|
||||
Item._original_queries = dict(Item._queries)
|
||||
Album._original_queries = dict(Album._queries)
|
||||
Item._queries.update(beets.plugins.named_queries(Item))
|
||||
Album._queries.update(beets.plugins.named_queries(Album))
|
||||
|
||||
def unload_plugins(self) -> None:
|
||||
"""Unload all plugins and remove them from the configuration."""
|
||||
# FIXME this should eventually be handled by a plugin manager
|
||||
for plugin_class in beets.plugins._instances:
|
||||
plugin_class.listeners = None
|
||||
self.config["plugins"] = []
|
||||
beets.plugins._classes = set()
|
||||
beets.plugins._instances = {}
|
||||
Item._types = getattr(Item, "_original_types", {})
|
||||
Album._types = getattr(Album, "_original_types", {})
|
||||
Item._queries = getattr(Item, "_original_queries", {})
|
||||
Album._queries = getattr(Album, "_original_queries", {})
|
||||
|
||||
@contextmanager
|
||||
def configure_plugin(self, config: Any):
|
||||
self.config[self.plugin].set(config)
|
||||
self.load_plugins(self.plugin)
|
||||
|
||||
yield
|
||||
|
||||
self.unload_plugins()
|
||||
|
||||
|
||||
class PluginTestCase(PluginMixin, BeetsTestCase):
|
||||
pass
|
||||
|
||||
|
||||
class ImportHelper(TestHelper):
|
||||
"""Provides tools to setup a library, a directory containing files that are
|
||||
to be imported and an import session. The class also provides stubs for the
|
||||
autotagging library and several assertions for the library.
|
||||
"""
|
||||
|
||||
def setup_beets(self, disk=False):
|
||||
super().setup_beets(disk)
|
||||
resource_path = syspath(os.path.join(_common.RSRC, b"full.mp3"))
|
||||
default_import_config = {
|
||||
"autotag": True,
|
||||
"copy": True,
|
||||
"hardlink": False,
|
||||
"link": False,
|
||||
"move": False,
|
||||
"resume": False,
|
||||
"singletons": False,
|
||||
"timid": True,
|
||||
}
|
||||
|
||||
lib: Library
|
||||
importer: ImportSession
|
||||
|
||||
@cached_property
|
||||
def import_path(self) -> Path:
|
||||
import_path = Path(os.fsdecode(self.temp_dir)) / "import"
|
||||
import_path.mkdir(exist_ok=True)
|
||||
return import_path
|
||||
|
||||
@cached_property
|
||||
def import_dir(self) -> bytes:
|
||||
return bytestring_path(self.import_path)
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.import_media = []
|
||||
self.lib.path_formats = [
|
||||
("default", os.path.join("$artist", "$album", "$title")),
|
||||
("singleton:true", os.path.join("singletons", "$title")),
|
||||
("comp:true", os.path.join("compilations", "$album", "$title")),
|
||||
]
|
||||
|
||||
def _create_import_dir(self, count=3):
|
||||
"""Creates a directory with media files to import.
|
||||
Sets ``self.import_dir`` to the path of the directory. Also sets
|
||||
``self.import_media`` to a list :class:`MediaFile` for all the files in
|
||||
the directory.
|
||||
def prepare_track_for_import(
|
||||
self,
|
||||
track_id: int,
|
||||
album_path: Path,
|
||||
album_id: int | None = None,
|
||||
) -> Path:
|
||||
track_path = album_path / f"track_{track_id}.mp3"
|
||||
shutil.copy(self.resource_path, track_path)
|
||||
medium = MediaFile(track_path)
|
||||
medium.update(
|
||||
{
|
||||
"album": "Tag Album" + (f" {album_id}" if album_id else ""),
|
||||
"albumartist": None,
|
||||
"mb_albumid": None,
|
||||
"comp": None,
|
||||
"artist": "Tag Artist",
|
||||
"title": f"Tag Track {track_id}",
|
||||
"track": track_id,
|
||||
"mb_trackid": None,
|
||||
}
|
||||
)
|
||||
medium.save()
|
||||
self.import_media.append(medium)
|
||||
return track_path
|
||||
|
||||
def prepare_album_for_import(
|
||||
self,
|
||||
item_count: int,
|
||||
album_id: int | None = None,
|
||||
album_path: Path | None = None,
|
||||
) -> list[Path]:
|
||||
"""Create an album directory with media files to import.
|
||||
|
||||
The directory has following layout
|
||||
the_album/
|
||||
album/
|
||||
track_1.mp3
|
||||
track_2.mp3
|
||||
track_3.mp3
|
||||
|
||||
:param count: Number of files to create
|
||||
"""
|
||||
self.import_dir = os.path.join(self.temp_dir, b"testsrcdir")
|
||||
if os.path.isdir(syspath(self.import_dir)):
|
||||
shutil.rmtree(syspath(self.import_dir))
|
||||
if not album_path:
|
||||
album_dir = f"album_{album_id}" if album_id else "album"
|
||||
album_path = self.import_path / album_dir
|
||||
|
||||
album_path = os.path.join(self.import_dir, b"the_album")
|
||||
os.makedirs(syspath(album_path))
|
||||
album_path.mkdir(exist_ok=True)
|
||||
|
||||
resource_path = os.path.join(_common.RSRC, b"full.mp3")
|
||||
return [
|
||||
self.prepare_track_for_import(tid, album_path, album_id=album_id)
|
||||
for tid in range(1, item_count + 1)
|
||||
]
|
||||
|
||||
metadata = {
|
||||
"artist": "Tag Artist",
|
||||
"album": "Tag Album",
|
||||
"albumartist": None,
|
||||
"mb_trackid": None,
|
||||
"mb_albumid": None,
|
||||
"comp": None,
|
||||
}
|
||||
self.media_files = []
|
||||
for i in range(count):
|
||||
# Copy files
|
||||
medium_path = os.path.join(
|
||||
album_path, bytestring_path("track_%d.mp3" % (i + 1))
|
||||
)
|
||||
shutil.copy(syspath(resource_path), syspath(medium_path))
|
||||
medium = MediaFile(medium_path)
|
||||
def prepare_albums_for_import(self, count: int = 1) -> None:
|
||||
album_dirs = Path(os.fsdecode(self.import_dir)).glob("album_*")
|
||||
base_idx = int(str(max(album_dirs, default="0")).split("_")[-1]) + 1
|
||||
|
||||
# Set metadata
|
||||
metadata["track"] = i + 1
|
||||
metadata["title"] = "Tag Title %d" % (i + 1)
|
||||
for attr in metadata:
|
||||
setattr(medium, attr, metadata[attr])
|
||||
medium.save()
|
||||
self.media_files.append(medium)
|
||||
self.import_media = self.media_files
|
||||
for album_id in range(base_idx, count + base_idx):
|
||||
self.prepare_album_for_import(1, album_id=album_id)
|
||||
|
||||
def _setup_import_session(
|
||||
self,
|
||||
import_dir=None,
|
||||
delete=False,
|
||||
threaded=False,
|
||||
copy=True,
|
||||
singletons=False,
|
||||
move=False,
|
||||
autotag=True,
|
||||
link=False,
|
||||
hardlink=False,
|
||||
):
|
||||
config["import"]["copy"] = copy
|
||||
config["import"]["delete"] = delete
|
||||
config["import"]["timid"] = True
|
||||
config["threaded"] = False
|
||||
config["import"]["singletons"] = singletons
|
||||
config["import"]["move"] = move
|
||||
config["import"]["autotag"] = autotag
|
||||
config["import"]["resume"] = False
|
||||
config["import"]["link"] = link
|
||||
config["import"]["hardlink"] = hardlink
|
||||
|
||||
self.importer = ImportSessionFixture(
|
||||
def _get_import_session(self, import_dir: bytes) -> ImportSession:
|
||||
return ImportSessionFixture(
|
||||
self.lib,
|
||||
loghandler=None,
|
||||
query=None,
|
||||
paths=[import_dir or self.import_dir],
|
||||
paths=[import_dir],
|
||||
)
|
||||
|
||||
def setup_importer(
|
||||
self, import_dir: bytes | None = None, **kwargs
|
||||
) -> ImportSession:
|
||||
self.config["import"].set_args({**self.default_import_config, **kwargs})
|
||||
self.importer = self._get_import_session(import_dir or self.import_dir)
|
||||
return self.importer
|
||||
|
||||
def setup_singleton_importer(self, **kwargs) -> ImportSession:
|
||||
return self.setup_importer(singletons=True, **kwargs)
|
||||
|
||||
def assert_file_in_lib(self, *segments):
|
||||
"""Join the ``segments`` and assert that this path exists in the
|
||||
library directory.
|
||||
|
|
@ -596,10 +646,25 @@ class ImportHelper(TestHelper):
|
|||
self.assertNotExists(os.path.join(self.libdir, *segments))
|
||||
|
||||
def assert_lib_dir_empty(self):
|
||||
self.assertEqual(len(os.listdir(syspath(self.libdir))), 0)
|
||||
assert not os.listdir(syspath(self.libdir))
|
||||
|
||||
|
||||
class ImportSessionFixture(importer.ImportSession):
|
||||
class AsIsImporterMixin:
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.prepare_album_for_import(1)
|
||||
|
||||
def run_asis_importer(self, **kwargs):
|
||||
importer = self.setup_importer(autotag=False, **kwargs)
|
||||
importer.run()
|
||||
return importer
|
||||
|
||||
|
||||
class ImportTestCase(ImportHelper, BeetsTestCase):
|
||||
pass
|
||||
|
||||
|
||||
class ImportSessionFixture(ImportSession):
|
||||
"""ImportSession that can be controlled programaticaly.
|
||||
|
||||
>>> lib = Library(':memory:')
|
||||
|
|
@ -646,10 +711,6 @@ class ImportSessionFixture(importer.ImportSession):
|
|||
|
||||
default_resolution = "REMOVE"
|
||||
|
||||
def add_resolution(self, resolution):
|
||||
assert isinstance(resolution, self.Resolution)
|
||||
self._resolutions.append(resolution)
|
||||
|
||||
def resolve_duplicate(self, task, found_duplicates):
|
||||
try:
|
||||
res = self._resolutions.pop(0)
|
||||
|
|
@ -702,124 +763,28 @@ class TerminalImportSessionFixture(TerminalImportSession):
|
|||
self.io.addinput("T")
|
||||
elif choice == importer.action.SKIP:
|
||||
self.io.addinput("S")
|
||||
elif isinstance(choice, int):
|
||||
else:
|
||||
self.io.addinput("M")
|
||||
self.io.addinput(str(choice))
|
||||
self._add_choice_input()
|
||||
else:
|
||||
raise Exception("Unknown choice %s" % choice)
|
||||
|
||||
|
||||
class TerminalImportSessionSetup:
|
||||
"""Overwrites ImportHelper._setup_import_session to provide a terminal importer"""
|
||||
class TerminalImportMixin(ImportHelper):
|
||||
"""Provides_a terminal importer for the import session."""
|
||||
|
||||
def _setup_import_session(
|
||||
self,
|
||||
import_dir=None,
|
||||
delete=False,
|
||||
threaded=False,
|
||||
copy=True,
|
||||
singletons=False,
|
||||
move=False,
|
||||
autotag=True,
|
||||
):
|
||||
config["import"]["copy"] = copy
|
||||
config["import"]["delete"] = delete
|
||||
config["import"]["timid"] = True
|
||||
config["threaded"] = False
|
||||
config["import"]["singletons"] = singletons
|
||||
config["import"]["move"] = move
|
||||
config["import"]["autotag"] = autotag
|
||||
config["import"]["resume"] = False
|
||||
io: _common.DummyIO
|
||||
|
||||
if not hasattr(self, "io"):
|
||||
self.io = _common.DummyIO()
|
||||
def _get_import_session(self, import_dir: bytes) -> importer.ImportSession:
|
||||
self.io.install()
|
||||
self.importer = TerminalImportSessionFixture(
|
||||
return TerminalImportSessionFixture(
|
||||
self.lib,
|
||||
loghandler=None,
|
||||
query=None,
|
||||
io=self.io,
|
||||
paths=[import_dir or self.import_dir],
|
||||
paths=[import_dir],
|
||||
)
|
||||
|
||||
|
||||
def generate_album_info(album_id, track_values):
|
||||
"""Return `AlbumInfo` populated with mock data.
|
||||
|
||||
Sets the album info's `album_id` field is set to the corresponding
|
||||
argument. For each pair (`id`, `values`) in `track_values` the `TrackInfo`
|
||||
from `generate_track_info` is added to the album info's `tracks` field.
|
||||
Most other fields of the album and track info are set to "album
|
||||
info" and "track info", respectively.
|
||||
"""
|
||||
tracks = [generate_track_info(id, values) for id, values in track_values]
|
||||
album = AlbumInfo(
|
||||
album_id="album info",
|
||||
album="album info",
|
||||
artist="album info",
|
||||
artist_id="album info",
|
||||
tracks=tracks,
|
||||
)
|
||||
for field in ALBUM_INFO_FIELDS:
|
||||
setattr(album, field, "album info")
|
||||
|
||||
return album
|
||||
|
||||
|
||||
ALBUM_INFO_FIELDS = [
|
||||
"album",
|
||||
"album_id",
|
||||
"artist",
|
||||
"artist_id",
|
||||
"asin",
|
||||
"albumtype",
|
||||
"va",
|
||||
"label",
|
||||
"barcode",
|
||||
"artist_sort",
|
||||
"releasegroup_id",
|
||||
"catalognum",
|
||||
"language",
|
||||
"country",
|
||||
"albumstatus",
|
||||
"media",
|
||||
"albumdisambig",
|
||||
"releasegroupdisambig",
|
||||
"artist_credit",
|
||||
"data_source",
|
||||
"data_url",
|
||||
]
|
||||
|
||||
|
||||
def generate_track_info(track_id="track info", values={}):
|
||||
"""Return `TrackInfo` populated with mock data.
|
||||
|
||||
The `track_id` field is set to the corresponding argument. All other
|
||||
string fields are set to "track info".
|
||||
"""
|
||||
track = TrackInfo(
|
||||
title="track info",
|
||||
track_id=track_id,
|
||||
)
|
||||
for field in TRACK_INFO_FIELDS:
|
||||
setattr(track, field, "track info")
|
||||
for field, value in values.items():
|
||||
setattr(track, field, value)
|
||||
return track
|
||||
|
||||
|
||||
TRACK_INFO_FIELDS = [
|
||||
"artist",
|
||||
"artist_id",
|
||||
"artist_sort",
|
||||
"disctitle",
|
||||
"artist_credit",
|
||||
"data_source",
|
||||
"data_url",
|
||||
]
|
||||
|
||||
|
||||
class AutotagStub:
|
||||
"""Stub out MusicBrainz album and track matcher and control what the
|
||||
autotagger returns.
|
||||
|
|
@ -888,7 +853,7 @@ class AutotagStub:
|
|||
|
||||
def _make_track_match(self, artist, album, number):
|
||||
return TrackInfo(
|
||||
title="Applied Title %d" % number,
|
||||
title="Applied Track %d" % number,
|
||||
track_id="match %d" % number,
|
||||
artist=artist,
|
||||
length=1,
|
||||
|
|
@ -919,6 +884,7 @@ class AutotagStub:
|
|||
artist_id="artistid" + id,
|
||||
albumtype="soundtrack",
|
||||
data_source="match_source",
|
||||
bandcamp_album_id="bc_url",
|
||||
)
|
||||
|
||||
|
||||
|
|
@ -932,7 +898,7 @@ class FetchImageHelper:
|
|||
super().run(*args, **kwargs)
|
||||
|
||||
IMAGEHEADER = {
|
||||
"image/jpeg": b"\x00" * 6 + b"JFIF",
|
||||
"image/jpeg": b"\xff\xd8\xff" + b"\x00" * 3 + b"JFIF",
|
||||
"image/png": b"\211PNG\r\n\032\n",
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ interface. To invoke the CLI, just call beets.ui.main(). The actual
|
|||
CLI commands are implemented in the ui.commands module.
|
||||
"""
|
||||
|
||||
|
||||
import errno
|
||||
import optparse
|
||||
import os.path
|
||||
|
|
@ -28,7 +27,7 @@ import sys
|
|||
import textwrap
|
||||
import traceback
|
||||
from difflib import SequenceMatcher
|
||||
from typing import Any, Callable, List
|
||||
from typing import Any, Callable
|
||||
|
||||
import confuse
|
||||
|
||||
|
|
@ -318,7 +317,7 @@ def input_options(
|
|||
|
||||
# Wrap the query text.
|
||||
# Start prompt with U+279C: Heavy Round-Tipped Rightwards Arrow
|
||||
prompt = colorize("action", "\u279C ")
|
||||
prompt = colorize("action", "\u279c ")
|
||||
line_length = 0
|
||||
for i, (part, length) in enumerate(
|
||||
zip(prompt_parts, prompt_part_lengths)
|
||||
|
|
@ -387,7 +386,7 @@ def input_yn(prompt, require=False):
|
|||
"yes" unless `require` is `True`, in which case there is no default.
|
||||
"""
|
||||
# Start prompt with U+279C: Heavy Round-Tipped Rightwards Arrow
|
||||
yesno = colorize("action", "\u279C ") + colorize(
|
||||
yesno = colorize("action", "\u279c ") + colorize(
|
||||
"action_description", "Enter Y or N:"
|
||||
)
|
||||
sel = input_options(("y", "n"), require, prompt, yesno)
|
||||
|
|
@ -1451,7 +1450,7 @@ class Subcommand:
|
|||
invoked by a SubcommandOptionParser.
|
||||
"""
|
||||
|
||||
func: Callable[[library.Library, optparse.Values, List[str]], Any]
|
||||
func: Callable[[library.Library, optparse.Values, list[str]], Any]
|
||||
|
||||
def __init__(self, name, parser=None, help="", aliases=(), hide=False):
|
||||
"""Creates a new subcommand. name is the primary way to invoke
|
||||
|
|
@ -1497,9 +1496,7 @@ class SubcommandsOptionParser(CommonOptionsParser):
|
|||
"""
|
||||
# A more helpful default usage.
|
||||
if "usage" not in kwargs:
|
||||
kwargs[
|
||||
"usage"
|
||||
] = """
|
||||
kwargs["usage"] = """
|
||||
%prog COMMAND [ARGS...]
|
||||
%prog help COMMAND"""
|
||||
kwargs["add_help_option"] = False
|
||||
|
|
@ -1861,13 +1858,21 @@ def main(args=None):
|
|||
"""Run the main command-line interface for beets. Includes top-level
|
||||
exception handlers that print friendly error messages.
|
||||
"""
|
||||
if "AppData\\Local\\Microsoft\\WindowsApps" in sys.exec_prefix:
|
||||
log.error(
|
||||
"error: beets is unable to use the Microsoft Store version of "
|
||||
"Python. Please install Python from https://python.org.\n"
|
||||
"error: More details can be found here "
|
||||
"https://beets.readthedocs.io/en/stable/guides/main.html"
|
||||
)
|
||||
sys.exit(1)
|
||||
try:
|
||||
_raw_main(args)
|
||||
except UserError as exc:
|
||||
message = exc.args[0] if exc.args else None
|
||||
log.error("error: {0}", message)
|
||||
sys.exit(1)
|
||||
except util.HumanReadableException as exc:
|
||||
except util.HumanReadableError as exc:
|
||||
exc.log(log)
|
||||
sys.exit(1)
|
||||
except library.FileOperationError as exc:
|
||||
|
|
|
|||
|
|
@ -16,13 +16,13 @@
|
|||
interface.
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import re
|
||||
from collections import Counter, namedtuple
|
||||
from collections import Counter
|
||||
from collections.abc import Sequence
|
||||
from itertools import chain
|
||||
from platform import python_version
|
||||
from typing import Sequence
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
import beets
|
||||
from beets import autotag, config, importer, library, logging, plugins, ui, util
|
||||
|
|
@ -47,7 +47,6 @@ from beets.util import (
|
|||
from . import _store_dict
|
||||
|
||||
VARIOUS_ARTISTS = "Various Artists"
|
||||
PromptChoice = namedtuple("PromptChoice", ["short", "long", "callback"])
|
||||
|
||||
# Global logger.
|
||||
log = logging.getLogger("beets")
|
||||
|
|
@ -664,8 +663,8 @@ class AlbumChange(ChangeRepresentation):
|
|||
suggests for them.
|
||||
"""
|
||||
# Tracks.
|
||||
# match is an AlbumMatch named tuple, mapping is a dict
|
||||
# Sort the pairs by the track_info index (at index 1 of the namedtuple)
|
||||
# match is an AlbumMatch NamedTuple, mapping is a dict
|
||||
# Sort the pairs by the track_info index (at index 1 of the NamedTuple)
|
||||
pairs = list(self.match.mapping.items())
|
||||
pairs.sort(key=lambda item_and_track_info: item_and_track_info[1].index)
|
||||
# Build up LHS and RHS for track difference display. The `lines` list
|
||||
|
|
@ -840,6 +839,12 @@ def _summary_judgment(rec):
|
|||
return action
|
||||
|
||||
|
||||
class PromptChoice(NamedTuple):
|
||||
short: str
|
||||
long: str
|
||||
callback: Any
|
||||
|
||||
|
||||
def choose_candidate(
|
||||
candidates,
|
||||
singleton,
|
||||
|
|
@ -1022,7 +1027,7 @@ def manual_id(session, task):
|
|||
|
||||
def abort_action(session, task):
|
||||
"""A prompt choice callback that aborts the importer."""
|
||||
raise importer.ImportAbort()
|
||||
raise importer.ImportAbortError()
|
||||
|
||||
|
||||
class TerminalImportSession(importer.ImportSession):
|
||||
|
|
@ -1052,7 +1057,7 @@ class TerminalImportSession(importer.ImportSession):
|
|||
if len(actions) == 1:
|
||||
return actions[0]
|
||||
elif len(actions) > 1:
|
||||
raise plugins.PluginConflictException(
|
||||
raise plugins.PluginConflictError(
|
||||
"Only one handler for `import_task_before_choice` may return "
|
||||
"an action."
|
||||
)
|
||||
|
|
@ -1312,8 +1317,7 @@ def import_files(lib, paths, query):
|
|||
loghandler = logging.FileHandler(logpath, encoding="utf-8")
|
||||
except OSError:
|
||||
raise ui.UserError(
|
||||
"could not open log file for writing: "
|
||||
"{}".format(displayable_path(logpath))
|
||||
f"Could not open log file for writing: {displayable_path(logpath)}"
|
||||
)
|
||||
else:
|
||||
loghandler = None
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@
|
|||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Miscellaneous utility functions."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
|
|
@ -26,45 +27,47 @@ import subprocess
|
|||
import sys
|
||||
import tempfile
|
||||
import traceback
|
||||
from collections import Counter, namedtuple
|
||||
from collections import Counter
|
||||
from contextlib import suppress
|
||||
from enum import Enum
|
||||
from logging import Logger
|
||||
from importlib import import_module
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from pathlib import Path
|
||||
from re import Pattern
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AnyStr,
|
||||
Callable,
|
||||
Generator,
|
||||
Iterable,
|
||||
List,
|
||||
MutableSequence,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
Tuple,
|
||||
NamedTuple,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
from unidecode import unidecode
|
||||
|
||||
from beets.util import hidden
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import Iterator, Sequence
|
||||
from logging import Logger
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeAlias
|
||||
else:
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
from unidecode import unidecode
|
||||
|
||||
from beets.util import hidden
|
||||
|
||||
MAX_FILENAME_LENGTH = 200
|
||||
WINDOWS_MAGIC_PREFIX = "\\\\?\\"
|
||||
T = TypeVar("T")
|
||||
Bytes_or_String: TypeAlias = Union[str, bytes]
|
||||
PathLike = Union[str, bytes, Path]
|
||||
BytesOrStr = Union[str, bytes]
|
||||
PathLike = Union[BytesOrStr, Path]
|
||||
Replacements: TypeAlias = "Sequence[tuple[Pattern[str], str]]"
|
||||
|
||||
|
||||
class HumanReadableException(Exception):
|
||||
class HumanReadableError(Exception):
|
||||
"""An Exception that can include a human-readable error message to
|
||||
be logged without a traceback. Can preserve a traceback for
|
||||
debugging purposes as well.
|
||||
|
|
@ -120,7 +123,7 @@ class HumanReadableException(Exception):
|
|||
logger.error("{0}: {1}", self.error_kind, self.args[0])
|
||||
|
||||
|
||||
class FilesystemError(HumanReadableException):
|
||||
class FilesystemError(HumanReadableError):
|
||||
"""An error that occurred while performing a filesystem manipulation
|
||||
via a function in this module. The `paths` field is a sequence of
|
||||
pathnames involved in the operation.
|
||||
|
|
@ -161,16 +164,16 @@ class MoveOperation(Enum):
|
|||
REFLINK_AUTO = 5
|
||||
|
||||
|
||||
def normpath(path: bytes) -> bytes:
|
||||
def normpath(path: PathLike) -> bytes:
|
||||
"""Provide the canonical form of the path suitable for storing in
|
||||
the database.
|
||||
"""
|
||||
path = syspath(path, prefix=False)
|
||||
path = os.path.normpath(os.path.abspath(os.path.expanduser(path)))
|
||||
return bytestring_path(path)
|
||||
str_path = syspath(path, prefix=False)
|
||||
str_path = os.path.normpath(os.path.abspath(os.path.expanduser(str_path)))
|
||||
return bytestring_path(str_path)
|
||||
|
||||
|
||||
def ancestry(path: bytes) -> List[str]:
|
||||
def ancestry(path: AnyStr) -> list[AnyStr]:
|
||||
"""Return a list consisting of path's parent directory, its
|
||||
grandparent, and so on. For instance:
|
||||
|
||||
|
|
@ -179,7 +182,7 @@ def ancestry(path: bytes) -> List[str]:
|
|||
|
||||
The argument should *not* be the result of a call to `syspath`.
|
||||
"""
|
||||
out = []
|
||||
out: list[AnyStr] = []
|
||||
last_path = None
|
||||
while path:
|
||||
path = os.path.dirname(path)
|
||||
|
|
@ -196,34 +199,34 @@ def ancestry(path: bytes) -> List[str]:
|
|||
|
||||
def sorted_walk(
|
||||
path: AnyStr,
|
||||
ignore: Sequence = (),
|
||||
ignore: Sequence[bytes] = (),
|
||||
ignore_hidden: bool = False,
|
||||
logger: Optional[Logger] = None,
|
||||
) -> Generator[Tuple, None, None]:
|
||||
logger: Logger | None = None,
|
||||
) -> Iterator[tuple[bytes, Sequence[bytes], Sequence[bytes]]]:
|
||||
"""Like `os.walk`, but yields things in case-insensitive sorted,
|
||||
breadth-first order. Directory and file names matching any glob
|
||||
pattern in `ignore` are skipped. If `logger` is provided, then
|
||||
warning messages are logged there when a directory cannot be listed.
|
||||
"""
|
||||
# Make sure the paths aren't Unicode strings.
|
||||
path = bytestring_path(path)
|
||||
bytes_path = bytestring_path(path)
|
||||
ignore = [bytestring_path(i) for i in ignore]
|
||||
|
||||
# Get all the directories and files at this level.
|
||||
try:
|
||||
contents = os.listdir(syspath(path))
|
||||
contents = os.listdir(syspath(bytes_path))
|
||||
except OSError as exc:
|
||||
if logger:
|
||||
logger.warning(
|
||||
"could not list directory {}: {}".format(
|
||||
displayable_path(path), exc.strerror
|
||||
displayable_path(bytes_path), exc.strerror
|
||||
)
|
||||
)
|
||||
return
|
||||
dirs = []
|
||||
files = []
|
||||
for base in contents:
|
||||
base = bytestring_path(base)
|
||||
for str_base in contents:
|
||||
base = bytestring_path(str_base)
|
||||
|
||||
# Skip ignored filenames.
|
||||
skip = False
|
||||
|
|
@ -231,7 +234,7 @@ def sorted_walk(
|
|||
if fnmatch.fnmatch(base, pat):
|
||||
if logger:
|
||||
logger.debug(
|
||||
"ignoring {} due to ignore rule {}".format(base, pat)
|
||||
"ignoring '{}' due to ignore rule '{}'", base, pat
|
||||
)
|
||||
skip = True
|
||||
break
|
||||
|
|
@ -239,7 +242,7 @@ def sorted_walk(
|
|||
continue
|
||||
|
||||
# Add to output as either a file or a directory.
|
||||
cur = os.path.join(path, base)
|
||||
cur = os.path.join(bytes_path, base)
|
||||
if (ignore_hidden and not hidden.is_hidden(cur)) or not ignore_hidden:
|
||||
if os.path.isdir(syspath(cur)):
|
||||
dirs.append(base)
|
||||
|
|
@ -249,12 +252,11 @@ def sorted_walk(
|
|||
# Sort lists (case-insensitive) and yield the current level.
|
||||
dirs.sort(key=bytes.lower)
|
||||
files.sort(key=bytes.lower)
|
||||
yield (path, dirs, files)
|
||||
yield (bytes_path, dirs, files)
|
||||
|
||||
# Recurse into directories.
|
||||
for base in dirs:
|
||||
cur = os.path.join(path, base)
|
||||
# yield from sorted_walk(...)
|
||||
cur = os.path.join(bytes_path, base)
|
||||
yield from sorted_walk(cur, ignore, ignore_hidden, logger)
|
||||
|
||||
|
||||
|
|
@ -295,8 +297,8 @@ def fnmatch_all(names: Sequence[bytes], patterns: Sequence[bytes]) -> bool:
|
|||
|
||||
|
||||
def prune_dirs(
|
||||
path: str,
|
||||
root: Optional[Bytes_or_String] = None,
|
||||
path: bytes,
|
||||
root: bytes | None = None,
|
||||
clutter: Sequence[str] = (".DS_Store", "Thumbs.db"),
|
||||
):
|
||||
"""If path is an empty directory, then remove it. Recursively remove
|
||||
|
|
@ -307,41 +309,41 @@ def prune_dirs(
|
|||
(i.e., no recursive removal).
|
||||
"""
|
||||
path = normpath(path)
|
||||
if root is not None:
|
||||
root = normpath(root)
|
||||
|
||||
root = normpath(root) if root else None
|
||||
ancestors = ancestry(path)
|
||||
|
||||
if root is None:
|
||||
# Only remove the top directory.
|
||||
ancestors = []
|
||||
elif root in ancestors:
|
||||
# Only remove directories below the root.
|
||||
# Only remove directories below the root_bytes.
|
||||
ancestors = ancestors[ancestors.index(root) + 1 :]
|
||||
else:
|
||||
# Remove nothing.
|
||||
return
|
||||
|
||||
bytes_clutter = [bytestring_path(c) for c in clutter]
|
||||
|
||||
# Traverse upward from path.
|
||||
ancestors.append(path)
|
||||
ancestors.reverse()
|
||||
for directory in ancestors:
|
||||
directory = syspath(directory)
|
||||
str_directory = syspath(directory)
|
||||
if not os.path.exists(directory):
|
||||
# Directory gone already.
|
||||
continue
|
||||
clutter: List[bytes] = [bytestring_path(c) for c in clutter]
|
||||
match_paths = [bytestring_path(d) for d in os.listdir(directory)]
|
||||
match_paths = [bytestring_path(d) for d in os.listdir(str_directory)]
|
||||
try:
|
||||
if fnmatch_all(match_paths, clutter):
|
||||
if fnmatch_all(match_paths, bytes_clutter):
|
||||
# Directory contains only clutter (or nothing).
|
||||
shutil.rmtree(directory)
|
||||
shutil.rmtree(str_directory)
|
||||
else:
|
||||
break
|
||||
except OSError:
|
||||
break
|
||||
|
||||
|
||||
def components(path: AnyStr) -> MutableSequence[AnyStr]:
|
||||
def components(path: AnyStr) -> list[AnyStr]:
|
||||
"""Return a list of the path components in path. For instance:
|
||||
|
||||
>>> components(b'/a/b/c')
|
||||
|
|
@ -387,7 +389,7 @@ def _fsencoding() -> str:
|
|||
return encoding
|
||||
|
||||
|
||||
def bytestring_path(path: Bytes_or_String) -> bytes:
|
||||
def bytestring_path(path: PathLike) -> bytes:
|
||||
"""Given a path, which is either a bytes or a unicode, returns a str
|
||||
path (ensuring that we never deal with Unicode pathnames). Path should be
|
||||
bytes but has safeguards for strings to be converted.
|
||||
|
|
@ -396,25 +398,28 @@ def bytestring_path(path: Bytes_or_String) -> bytes:
|
|||
if isinstance(path, bytes):
|
||||
return path
|
||||
|
||||
str_path = str(path)
|
||||
|
||||
# On Windows, remove the magic prefix added by `syspath`. This makes
|
||||
# ``bytestring_path(syspath(X)) == X``, i.e., we can safely
|
||||
# round-trip through `syspath`.
|
||||
if os.path.__name__ == "ntpath" and path.startswith(WINDOWS_MAGIC_PREFIX):
|
||||
path = path[len(WINDOWS_MAGIC_PREFIX) :]
|
||||
if os.path.__name__ == "ntpath" and str_path.startswith(
|
||||
WINDOWS_MAGIC_PREFIX
|
||||
):
|
||||
str_path = str_path[len(WINDOWS_MAGIC_PREFIX) :]
|
||||
|
||||
# Try to encode with default encodings, but fall back to utf-8.
|
||||
try:
|
||||
return path.encode(_fsencoding())
|
||||
return str_path.encode(_fsencoding())
|
||||
except (UnicodeError, LookupError):
|
||||
return path.encode("utf-8")
|
||||
return str_path.encode("utf-8")
|
||||
|
||||
|
||||
PATH_SEP: bytes = bytestring_path(os.sep)
|
||||
|
||||
|
||||
def displayable_path(
|
||||
path: Union[bytes, str, Tuple[Union[bytes, str], ...]],
|
||||
separator: str = "; ",
|
||||
path: BytesOrStr | tuple[BytesOrStr, ...], separator: str = "; "
|
||||
) -> str:
|
||||
"""Attempts to decode a bytestring path to a unicode object for the
|
||||
purpose of displaying it to the user. If the `path` argument is a
|
||||
|
|
@ -434,59 +439,52 @@ def displayable_path(
|
|||
return path.decode("utf-8", "ignore")
|
||||
|
||||
|
||||
def syspath(path: Bytes_or_String, prefix: bool = True) -> Bytes_or_String:
|
||||
def syspath(path: PathLike, prefix: bool = True) -> str:
|
||||
"""Convert a path for use by the operating system. In particular,
|
||||
paths on Windows must receive a magic prefix and must be converted
|
||||
to Unicode before they are sent to the OS. To disable the magic
|
||||
prefix on Windows, set `prefix` to False---but only do this if you
|
||||
*really* know what you're doing.
|
||||
"""
|
||||
str_path = os.fsdecode(path)
|
||||
# Don't do anything if we're not on windows
|
||||
if os.path.__name__ != "ntpath":
|
||||
return path
|
||||
|
||||
if not isinstance(path, str):
|
||||
# Beets currently represents Windows paths internally with UTF-8
|
||||
# arbitrarily. But earlier versions used MBCS because it is
|
||||
# reported as the FS encoding by Windows. Try both.
|
||||
try:
|
||||
path = path.decode("utf-8")
|
||||
except UnicodeError:
|
||||
# The encoding should always be MBCS, Windows' broken
|
||||
# Unicode representation.
|
||||
assert isinstance(path, bytes)
|
||||
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
|
||||
path = path.decode(encoding, "replace")
|
||||
return str_path
|
||||
|
||||
# Add the magic prefix if it isn't already there.
|
||||
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
|
||||
if prefix and not path.startswith(WINDOWS_MAGIC_PREFIX):
|
||||
if path.startswith("\\\\"):
|
||||
if prefix and not str_path.startswith(WINDOWS_MAGIC_PREFIX):
|
||||
if str_path.startswith("\\\\"):
|
||||
# UNC path. Final path should look like \\?\UNC\...
|
||||
path = "UNC" + path[1:]
|
||||
path = WINDOWS_MAGIC_PREFIX + path
|
||||
str_path = "UNC" + str_path[1:]
|
||||
str_path = WINDOWS_MAGIC_PREFIX + str_path
|
||||
|
||||
return path
|
||||
return str_path
|
||||
|
||||
|
||||
def samefile(p1: bytes, p2: bytes) -> bool:
|
||||
"""Safer equality for paths."""
|
||||
if p1 == p2:
|
||||
return True
|
||||
return shutil._samefile(syspath(p1), syspath(p2))
|
||||
with suppress(OSError):
|
||||
return os.path.samefile(syspath(p1), syspath(p2))
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def remove(path: Optional[bytes], soft: bool = True):
|
||||
def remove(path: bytes, soft: bool = True):
|
||||
"""Remove the file. If `soft`, then no error will be raised if the
|
||||
file does not exist.
|
||||
"""
|
||||
path = syspath(path)
|
||||
if not path or (soft and not os.path.exists(path)):
|
||||
str_path = syspath(path)
|
||||
if not str_path or (soft and not os.path.exists(str_path)):
|
||||
return
|
||||
try:
|
||||
os.remove(path)
|
||||
os.remove(str_path)
|
||||
except OSError as exc:
|
||||
raise FilesystemError(exc, "delete", (path,), traceback.format_exc())
|
||||
raise FilesystemError(
|
||||
exc, "delete", (str_path,), traceback.format_exc()
|
||||
)
|
||||
|
||||
|
||||
def copy(path: bytes, dest: bytes, replace: bool = False):
|
||||
|
|
@ -497,23 +495,22 @@ def copy(path: bytes, dest: bytes, replace: bool = False):
|
|||
"""
|
||||
if samefile(path, dest):
|
||||
return
|
||||
path = syspath(path)
|
||||
dest = syspath(dest)
|
||||
if not replace and os.path.exists(dest):
|
||||
raise FilesystemError("file exists", "copy", (path, dest))
|
||||
str_path = syspath(path)
|
||||
str_dest = syspath(dest)
|
||||
if not replace and os.path.exists(str_dest):
|
||||
raise FilesystemError("file exists", "copy", (str_path, str_dest))
|
||||
try:
|
||||
shutil.copyfile(path, dest)
|
||||
shutil.copyfile(str_path, str_dest)
|
||||
except OSError as exc:
|
||||
raise FilesystemError(exc, "copy", (path, dest), traceback.format_exc())
|
||||
raise FilesystemError(
|
||||
exc, "copy", (str_path, str_dest), traceback.format_exc()
|
||||
)
|
||||
|
||||
|
||||
def move(path: bytes, dest: bytes, replace: bool = False):
|
||||
"""Rename a file. `dest` may not be a directory. If `dest` already
|
||||
exists, raises an OSError unless `replace` is True. Has no effect if
|
||||
`path` is the same as `dest`. If the paths are on different
|
||||
filesystems (or the rename otherwise fails), a copy is attempted
|
||||
instead, in which case metadata will *not* be preserved. Paths are
|
||||
translated to system paths.
|
||||
`path` is the same as `dest`. Paths are translated to system paths.
|
||||
"""
|
||||
if os.path.isdir(syspath(path)):
|
||||
raise FilesystemError("source is directory", "move", (path, dest))
|
||||
|
|
@ -539,22 +536,36 @@ def move(path: bytes, dest: bytes, replace: bool = False):
|
|||
)
|
||||
try:
|
||||
with open(syspath(path), "rb") as f:
|
||||
shutil.copyfileobj(f, tmp)
|
||||
# mypy bug:
|
||||
# - https://github.com/python/mypy/issues/15031
|
||||
# - https://github.com/python/mypy/issues/14943
|
||||
# Fix not yet released:
|
||||
# - https://github.com/python/mypy/pull/14975
|
||||
shutil.copyfileobj(f, tmp) # type: ignore[misc]
|
||||
finally:
|
||||
tmp.close()
|
||||
|
||||
# Move the copied file into place.
|
||||
try:
|
||||
os.replace(tmp.name, syspath(dest))
|
||||
tmp = None
|
||||
# Copy file metadata
|
||||
shutil.copystat(syspath(path), tmp.name)
|
||||
except OSError:
|
||||
# Ignore errors because it doesn't matter too much. We may be on a
|
||||
# filesystem that doesn't support this.
|
||||
pass
|
||||
|
||||
# Move the copied file into place.
|
||||
tmp_filename = tmp.name
|
||||
try:
|
||||
os.replace(tmp_filename, syspath(dest))
|
||||
tmp_filename = ""
|
||||
os.remove(syspath(path))
|
||||
except OSError as exc:
|
||||
raise FilesystemError(
|
||||
exc, "move", (path, dest), traceback.format_exc()
|
||||
)
|
||||
finally:
|
||||
if tmp is not None:
|
||||
os.remove(tmp)
|
||||
if tmp_filename:
|
||||
os.remove(tmp_filename)
|
||||
|
||||
|
||||
def link(path: bytes, dest: bytes, replace: bool = False):
|
||||
|
|
@ -622,31 +633,33 @@ def reflink(
|
|||
Raise an `OSError` if `dest` already exists, unless `replace` is
|
||||
True. If `path` == `dest`, then do nothing.
|
||||
|
||||
If reflinking fails and `fallback` is enabled, try copying the file
|
||||
instead. Otherwise, raise an error without trying a plain copy.
|
||||
|
||||
May raise an `ImportError` if the `reflink` module is not available.
|
||||
If `fallback` is enabled, ignore errors and copy the file instead.
|
||||
Otherwise, errors are re-raised as FilesystemError with an explanation.
|
||||
"""
|
||||
import reflink as pyreflink
|
||||
|
||||
if samefile(path, dest):
|
||||
return
|
||||
|
||||
if os.path.exists(syspath(dest)) and not replace:
|
||||
raise FilesystemError("file exists", "rename", (path, dest))
|
||||
raise FilesystemError("target exists", "rename", (path, dest))
|
||||
|
||||
if fallback:
|
||||
with suppress(Exception):
|
||||
return import_module("reflink").reflink(path, dest)
|
||||
return copy(path, dest, replace)
|
||||
|
||||
try:
|
||||
pyreflink.reflink(path, dest)
|
||||
except (NotImplementedError, pyreflink.ReflinkImpossibleError):
|
||||
if fallback:
|
||||
copy(path, dest, replace)
|
||||
else:
|
||||
raise FilesystemError(
|
||||
"OS/filesystem does not support reflinks.",
|
||||
"link",
|
||||
(path, dest),
|
||||
traceback.format_exc(),
|
||||
)
|
||||
import_module("reflink").reflink(path, dest)
|
||||
except (ImportError, OSError):
|
||||
raise
|
||||
except Exception as exc:
|
||||
msg = {
|
||||
"EXDEV": "Cannot reflink across devices",
|
||||
"EOPNOTSUPP": "Device does not support reflinks",
|
||||
}.get(str(exc), "OS does not support reflinks")
|
||||
|
||||
raise FilesystemError(
|
||||
msg, "reflink", (path, dest), traceback.format_exc()
|
||||
) from exc
|
||||
|
||||
|
||||
def unique_path(path: bytes) -> bytes:
|
||||
|
|
@ -676,7 +689,7 @@ def unique_path(path: bytes) -> bytes:
|
|||
# Unix. They are forbidden here because they cause problems on Samba
|
||||
# shares, which are sufficiently common as to cause frequent problems.
|
||||
# https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247.aspx
|
||||
CHAR_REPLACE: List[Tuple[Pattern, str]] = [
|
||||
CHAR_REPLACE = [
|
||||
(re.compile(r"[\\/]"), "_"), # / and \ -- forbidden everywhere.
|
||||
(re.compile(r"^\."), "_"), # Leading dot (hidden files on Unix).
|
||||
(re.compile(r"[\x00-\x1f]"), ""), # Control characters.
|
||||
|
|
@ -686,10 +699,7 @@ CHAR_REPLACE: List[Tuple[Pattern, str]] = [
|
|||
]
|
||||
|
||||
|
||||
def sanitize_path(
|
||||
path: str,
|
||||
replacements: Optional[Sequence[Sequence[Union[Pattern, str]]]] = None,
|
||||
) -> str:
|
||||
def sanitize_path(path: str, replacements: Replacements | None = None) -> str:
|
||||
"""Takes a path (as a Unicode string) and makes sure that it is
|
||||
legal. Returns a new path. Only works with fragments; won't work
|
||||
reliably on Windows when a path begins with a drive letter. Path
|
||||
|
|
@ -729,11 +739,11 @@ def truncate_path(path: AnyStr, length: int = MAX_FILENAME_LENGTH) -> AnyStr:
|
|||
|
||||
def _legalize_stage(
|
||||
path: str,
|
||||
replacements: Optional[Sequence[Sequence[Union[Pattern, str]]]],
|
||||
replacements: Replacements | None,
|
||||
length: int,
|
||||
extension: str,
|
||||
fragment: bool,
|
||||
) -> Tuple[Bytes_or_String, bool]:
|
||||
) -> tuple[BytesOrStr, bool]:
|
||||
"""Perform a single round of path legalization steps
|
||||
(sanitation/replacement, encoding from Unicode to bytes,
|
||||
extension-appending, and truncation). Return the path (Unicode if
|
||||
|
|
@ -759,11 +769,11 @@ def _legalize_stage(
|
|||
|
||||
def legalize_path(
|
||||
path: str,
|
||||
replacements: Optional[Sequence[Sequence[Union[Pattern, str]]]],
|
||||
replacements: Replacements | None,
|
||||
length: int,
|
||||
extension: bytes,
|
||||
fragment: bool,
|
||||
) -> Tuple[Union[Bytes_or_String, bool]]:
|
||||
) -> tuple[BytesOrStr, bool]:
|
||||
"""Given a path-like Unicode string, produce a legal path. Return
|
||||
the path and a flag indicating whether some replacements had to be
|
||||
ignored (see below).
|
||||
|
|
@ -830,7 +840,7 @@ def as_string(value: Any) -> str:
|
|||
return str(value)
|
||||
|
||||
|
||||
def plurality(objs: Sequence[T]) -> T:
|
||||
def plurality(objs: Sequence[T]) -> tuple[T, int]:
|
||||
"""Given a sequence of hashble objects, returns the object that
|
||||
is most common in the set and the its number of appearance. The
|
||||
sequence must contain at least one object.
|
||||
|
|
@ -841,7 +851,7 @@ def plurality(objs: Sequence[T]) -> T:
|
|||
return c.most_common(1)[0]
|
||||
|
||||
|
||||
def convert_command_args(args: List[bytes]) -> List[str]:
|
||||
def convert_command_args(args: list[BytesOrStr]) -> list[str]:
|
||||
"""Convert command arguments, which may either be `bytes` or `str`
|
||||
objects, to uniformly surrogate-escaped strings."""
|
||||
assert isinstance(args, list)
|
||||
|
|
@ -855,13 +865,12 @@ def convert_command_args(args: List[bytes]) -> List[str]:
|
|||
|
||||
|
||||
# stdout and stderr as bytes
|
||||
CommandOutput = namedtuple("CommandOutput", ("stdout", "stderr"))
|
||||
class CommandOutput(NamedTuple):
|
||||
stdout: bytes
|
||||
stderr: bytes
|
||||
|
||||
|
||||
def command_output(
|
||||
cmd: List[Bytes_or_String],
|
||||
shell: bool = False,
|
||||
) -> CommandOutput:
|
||||
def command_output(cmd: list[BytesOrStr], shell: bool = False) -> CommandOutput:
|
||||
"""Runs the command and returns its output after it has exited.
|
||||
|
||||
Returns a CommandOutput. The attributes ``stdout`` and ``stderr`` contain
|
||||
|
|
@ -879,7 +888,7 @@ def command_output(
|
|||
This replaces `subprocess.check_output` which can have problems if lots of
|
||||
output is sent to stderr.
|
||||
"""
|
||||
cmd = convert_command_args(cmd)
|
||||
converted_cmd = convert_command_args(cmd)
|
||||
|
||||
devnull = subprocess.DEVNULL
|
||||
|
||||
|
|
@ -895,13 +904,13 @@ def command_output(
|
|||
if proc.returncode:
|
||||
raise subprocess.CalledProcessError(
|
||||
returncode=proc.returncode,
|
||||
cmd=" ".join(map(str, cmd)),
|
||||
cmd=" ".join(converted_cmd),
|
||||
output=stdout + stderr,
|
||||
)
|
||||
return CommandOutput(stdout, stderr)
|
||||
|
||||
|
||||
def max_filename_length(path: AnyStr, limit=MAX_FILENAME_LENGTH) -> int:
|
||||
def max_filename_length(path: BytesOrStr, limit=MAX_FILENAME_LENGTH) -> int:
|
||||
"""Attempt to determine the maximum filename length for the
|
||||
filesystem containing `path`. If the value is greater than `limit`,
|
||||
then `limit` is used instead (to prevent errors when a filesystem
|
||||
|
|
@ -1041,7 +1050,7 @@ def asciify_path(path: str, sep_replace: str) -> str:
|
|||
# if this platform has an os.altsep, change it to os.sep.
|
||||
if os.altsep:
|
||||
path = path.replace(os.altsep, os.sep)
|
||||
path_components: List[Bytes_or_String] = path.split(os.sep)
|
||||
path_components: list[str] = path.split(os.sep)
|
||||
for index, item in enumerate(path_components):
|
||||
path_components[index] = unidecode(item).replace(os.sep, sep_replace)
|
||||
if os.altsep:
|
||||
|
|
@ -1051,7 +1060,7 @@ def asciify_path(path: str, sep_replace: str) -> str:
|
|||
return os.sep.join(path_components)
|
||||
|
||||
|
||||
def par_map(transform: Callable, items: Iterable):
|
||||
def par_map(transform: Callable[[T], Any], items: Sequence[T]) -> None:
|
||||
"""Apply the function `transform` to all the elements in the
|
||||
iterable `items`, like `map(transform, items)` but with no return
|
||||
value.
|
||||
|
|
@ -1065,7 +1074,7 @@ def par_map(transform: Callable, items: Iterable):
|
|||
pool.join()
|
||||
|
||||
|
||||
class cached_classproperty: # noqa: N801
|
||||
class cached_classproperty:
|
||||
"""A decorator implementing a read-only property that is *lazy* in
|
||||
the sense that the getter is only invoked once. Subsequent accesses
|
||||
through *any* instance use the cached result.
|
||||
|
|
@ -1123,3 +1132,8 @@ def get_temp_filename(
|
|||
|
||||
_, filename = tempfile.mkstemp(dir=tempdir, prefix=prefix, suffix=suffix)
|
||||
return bytestring_path(filename)
|
||||
|
||||
|
||||
def unique_list(elements: Iterable[T]) -> list[T]:
|
||||
"""Return a list with unique elements in the original order."""
|
||||
return list(dict.fromkeys(elements))
|
||||
|
|
|
|||
|
|
@ -203,7 +203,7 @@ def _event_select(events):
|
|||
return ready_events
|
||||
|
||||
|
||||
class ThreadException(Exception):
|
||||
class ThreadError(Exception):
|
||||
def __init__(self, coro, exc_info):
|
||||
self.coro = coro
|
||||
self.exc_info = exc_info
|
||||
|
|
@ -266,7 +266,7 @@ def run(root_coro):
|
|||
"""After an event is fired, run a given coroutine associated with
|
||||
it in the threads dict until it yields again. If the coroutine
|
||||
exits, then the thread is removed from the pool. If the coroutine
|
||||
raises an exception, it is reraised in a ThreadException. If
|
||||
raises an exception, it is reraised in a ThreadError. If
|
||||
is_exc is True, then the value must be an exc_info tuple and the
|
||||
exception is thrown into the coroutine.
|
||||
"""
|
||||
|
|
@ -281,7 +281,7 @@ def run(root_coro):
|
|||
except BaseException:
|
||||
# Thread raised some other exception.
|
||||
del threads[coro]
|
||||
raise ThreadException(coro, sys.exc_info())
|
||||
raise ThreadError(coro, sys.exc_info())
|
||||
else:
|
||||
if isinstance(next_event, types.GeneratorType):
|
||||
# Automatically invoke sub-coroutines. (Shorthand for
|
||||
|
|
@ -369,7 +369,7 @@ def run(root_coro):
|
|||
else:
|
||||
advance_thread(event2coro[event], value)
|
||||
|
||||
except ThreadException as te:
|
||||
except ThreadError as te:
|
||||
# Exception raised from inside a thread.
|
||||
event = ExceptionEvent(te.exc_info)
|
||||
if te.coro in delegators:
|
||||
|
|
|
|||
|
|
@ -1,35 +0,0 @@
|
|||
# This file is part of beets.
|
||||
# Copyright 2016-2019, Adrian Sampson.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
|
||||
import warnings
|
||||
|
||||
import confuse
|
||||
|
||||
warnings.warn(
|
||||
"beets.util.confit is deprecated; use confuse instead",
|
||||
# Show the location of the `import confit` statement as the warning's
|
||||
# source, rather than this file, such that the offending module can be
|
||||
# identified easily.
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
# Import everything from the confuse module into this module.
|
||||
for key, value in confuse.__dict__.items():
|
||||
if key not in ["__name__"]:
|
||||
globals()[key] = value
|
||||
|
||||
|
||||
# Cleanup namespace.
|
||||
del key, value, warnings, confuse
|
||||
|
|
@ -1,42 +0,0 @@
|
|||
# This file is part of beets.
|
||||
# Copyright 2016, Adrian Sampson.
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class OrderedEnum(Enum):
|
||||
"""
|
||||
An Enum subclass that allows comparison of members.
|
||||
"""
|
||||
|
||||
def __ge__(self, other):
|
||||
if self.__class__ is other.__class__:
|
||||
return self.value >= other.value
|
||||
return NotImplemented
|
||||
|
||||
def __gt__(self, other):
|
||||
if self.__class__ is other.__class__:
|
||||
return self.value > other.value
|
||||
return NotImplemented
|
||||
|
||||
def __le__(self, other):
|
||||
if self.__class__ is other.__class__:
|
||||
return self.value <= other.value
|
||||
return NotImplemented
|
||||
|
||||
def __lt__(self, other):
|
||||
if self.__class__ is other.__class__:
|
||||
return self.value < other.value
|
||||
return NotImplemented
|
||||
|
|
@ -26,7 +26,6 @@ This is sort of like a tiny, horrible degeneration of a real templating
|
|||
engine like Jinja2 or Mustache.
|
||||
"""
|
||||
|
||||
|
||||
import ast
|
||||
import dis
|
||||
import functools
|
||||
|
|
|
|||
|
|
@ -31,7 +31,6 @@ To do so, pass an iterable of coroutines to the Pipeline constructor
|
|||
in place of any single coroutine.
|
||||
"""
|
||||
|
||||
|
||||
import queue
|
||||
import sys
|
||||
from threading import Lock, Thread
|
||||
|
|
|
|||
|
|
@ -16,11 +16,14 @@
|
|||
libraries.
|
||||
"""
|
||||
|
||||
from collections import namedtuple
|
||||
from typing import Any, NamedTuple
|
||||
|
||||
from beets import util
|
||||
|
||||
Node = namedtuple("Node", ["files", "dirs"])
|
||||
|
||||
class Node(NamedTuple):
|
||||
files: dict[str, Any]
|
||||
dirs: dict[str, Any]
|
||||
|
||||
|
||||
def _insert(node, path, itemid):
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
"""A namespace package for beets plugins."""
|
||||
|
||||
|
||||
# Make this a namespace package.
|
||||
from pkgutil import extend_path
|
||||
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Calculate acoustic information and submit to AcousticBrainz.
|
||||
"""
|
||||
|
||||
"""Calculate acoustic information and submit to AcousticBrainz."""
|
||||
|
||||
import errno
|
||||
import hashlib
|
||||
|
|
@ -187,9 +185,9 @@ only files which would be processed",
|
|||
with open(filename) as tmp_file:
|
||||
analysis = json.load(tmp_file)
|
||||
# Add the hash to the output.
|
||||
analysis["metadata"]["version"][
|
||||
"essentia_build_sha"
|
||||
] = self.extractor_sha
|
||||
analysis["metadata"]["version"]["essentia_build_sha"] = (
|
||||
self.extractor_sha
|
||||
)
|
||||
return analysis
|
||||
finally:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Fetch various AcousticBrainz metadata using MBID.
|
||||
"""
|
||||
"""Fetch various AcousticBrainz metadata using MBID."""
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
"""Adds an album template field for formatted album types."""
|
||||
|
||||
|
||||
from beets.autotag.mb import VARIOUS_ARTISTS_ID
|
||||
from beets.library import Album
|
||||
from beets.plugins import BeetsPlugin
|
||||
|
|
|
|||
|
|
@ -14,13 +14,13 @@
|
|||
|
||||
"""An AURA server using Flask."""
|
||||
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from collections.abc import Mapping
|
||||
from dataclasses import dataclass
|
||||
from mimetypes import guess_type
|
||||
from typing import ClassVar, Mapping, Type
|
||||
from typing import ClassVar
|
||||
|
||||
from flask import (
|
||||
Blueprint,
|
||||
|
|
@ -128,7 +128,7 @@ ARTIST_ATTR_MAP = {
|
|||
class AURADocument:
|
||||
"""Base class for building AURA documents."""
|
||||
|
||||
model_cls: ClassVar[Type[LibModel]]
|
||||
model_cls: ClassVar[type[LibModel]]
|
||||
|
||||
lib: Library
|
||||
args: Mapping[str, str]
|
||||
|
|
@ -154,7 +154,7 @@ class AURADocument:
|
|||
return make_response(document, status)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_converter(cls, beets_attr: str) -> Type[SQLiteType]:
|
||||
def get_attribute_converter(cls, beets_attr: str) -> type[SQLiteType]:
|
||||
"""Work out what data type an attribute should be for beets.
|
||||
|
||||
Args:
|
||||
|
|
@ -375,7 +375,7 @@ class TrackDocument(AURADocument):
|
|||
return self.lib.items(query, sort)
|
||||
|
||||
@classmethod
|
||||
def get_attribute_converter(cls, beets_attr: str) -> Type[SQLiteType]:
|
||||
def get_attribute_converter(cls, beets_attr: str) -> type[SQLiteType]:
|
||||
"""Work out what data type an attribute should be for beets.
|
||||
|
||||
Args:
|
||||
|
|
|
|||
|
|
@ -11,81 +11,74 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Uses Librosa to calculate the `bpm` field.
|
||||
"""
|
||||
"""Uses Librosa to calculate the `bpm` field."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from librosa import beat, load
|
||||
from soundfile import LibsndfileError
|
||||
from collections.abc import Iterable
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import librosa
|
||||
|
||||
from beets import ui, util
|
||||
from beets.plugins import BeetsPlugin
|
||||
from beets.ui import Subcommand, should_write
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from beets.importer import ImportTask
|
||||
from beets.library import Item, Library
|
||||
|
||||
|
||||
class AutoBPMPlugin(BeetsPlugin):
|
||||
def __init__(self):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.config.add(
|
||||
{
|
||||
"auto": True,
|
||||
"overwrite": False,
|
||||
"beat_track_kwargs": {},
|
||||
}
|
||||
)
|
||||
|
||||
if self.config["auto"].get(bool):
|
||||
if self.config["auto"]:
|
||||
self.import_stages = [self.imported]
|
||||
|
||||
def commands(self):
|
||||
cmd = ui.Subcommand(
|
||||
def commands(self) -> list[Subcommand]:
|
||||
cmd = Subcommand(
|
||||
"autobpm", help="detect and add bpm from audio using Librosa"
|
||||
)
|
||||
cmd.func = self.command
|
||||
return [cmd]
|
||||
|
||||
def command(self, lib, opts, args):
|
||||
self.calculate_bpm(lib.items(ui.decargs(args)), write=ui.should_write())
|
||||
def command(self, lib: Library, _, args: list[str]) -> None:
|
||||
self.calculate_bpm(list(lib.items(args)), write=should_write())
|
||||
|
||||
def imported(self, session, task):
|
||||
def imported(self, _, task: ImportTask) -> None:
|
||||
self.calculate_bpm(task.imported_items())
|
||||
|
||||
def calculate_bpm(self, items, write=False):
|
||||
overwrite = self.config["overwrite"].get(bool)
|
||||
|
||||
def calculate_bpm(self, items: list[Item], write: bool = False) -> None:
|
||||
for item in items:
|
||||
if item["bpm"]:
|
||||
self._log.info(
|
||||
"found bpm {0} for {1}",
|
||||
item["bpm"],
|
||||
util.displayable_path(item.path),
|
||||
)
|
||||
if not overwrite:
|
||||
path = item.filepath
|
||||
if bpm := item.bpm:
|
||||
self._log.info("BPM for {} already exists: {}", path, bpm)
|
||||
if not self.config["overwrite"]:
|
||||
continue
|
||||
|
||||
try:
|
||||
y, sr = load(util.syspath(item.path), res_type="kaiser_fast")
|
||||
except LibsndfileError as exc:
|
||||
self._log.error(
|
||||
"LibsndfileError: failed to load {0} {1}",
|
||||
util.displayable_path(item.path),
|
||||
exc,
|
||||
)
|
||||
continue
|
||||
except ValueError as exc:
|
||||
self._log.error(
|
||||
"ValueError: failed to load {0} {1}",
|
||||
util.displayable_path(item.path),
|
||||
exc,
|
||||
)
|
||||
y, sr = librosa.load(item.filepath, res_type="kaiser_fast")
|
||||
except Exception as exc:
|
||||
self._log.error("Failed to load {}: {}", path, exc)
|
||||
continue
|
||||
|
||||
tempo, _ = beat.beat_track(y=y, sr=sr)
|
||||
bpm = round(tempo)
|
||||
kwargs = self.config["beat_track_kwargs"].flatten()
|
||||
try:
|
||||
tempo, _ = librosa.beat.beat_track(y=y, sr=sr, **kwargs)
|
||||
except Exception as exc:
|
||||
self._log.error("Failed to measure BPM for {}: {}", path, exc)
|
||||
continue
|
||||
|
||||
bpm = round(tempo[0] if isinstance(tempo, Iterable) else tempo)
|
||||
item["bpm"] = bpm
|
||||
self._log.info(
|
||||
"added computed bpm {0} for {1}",
|
||||
bpm,
|
||||
util.displayable_path(item.path),
|
||||
)
|
||||
self._log.info("Computed BPM for {}: {}", path, bpm)
|
||||
|
||||
if write:
|
||||
item.try_write()
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Use command-line tools to check for audio file corruption.
|
||||
"""
|
||||
|
||||
"""Use command-line tools to check for audio file corruption."""
|
||||
|
||||
import errno
|
||||
import os
|
||||
|
|
@ -30,7 +28,7 @@ from beets.ui import Subcommand
|
|||
from beets.util import displayable_path, par_map
|
||||
|
||||
|
||||
class CheckerCommandException(Exception):
|
||||
class CheckerCommandError(Exception):
|
||||
"""Raised when running a checker failed.
|
||||
|
||||
Attributes:
|
||||
|
|
@ -70,7 +68,7 @@ class BadFiles(BeetsPlugin):
|
|||
errors = 1
|
||||
status = e.returncode
|
||||
except OSError as e:
|
||||
raise CheckerCommandException(cmd, e)
|
||||
raise CheckerCommandError(cmd, e)
|
||||
output = output.decode(sys.getdefaultencoding(), "replace")
|
||||
return status, errors, [line for line in output.split("\n") if line]
|
||||
|
||||
|
|
@ -128,7 +126,7 @@ class BadFiles(BeetsPlugin):
|
|||
path = item.path.decode(sys.getfilesystemencoding())
|
||||
try:
|
||||
status, errors, output = checker(path)
|
||||
except CheckerCommandException as e:
|
||||
except CheckerCommandError as e:
|
||||
if e.errno == errno.ENOENT:
|
||||
self._log.error(
|
||||
"command not found: {} when validating file: {}",
|
||||
|
|
@ -200,7 +198,7 @@ class BadFiles(BeetsPlugin):
|
|||
elif sel == "c":
|
||||
return None
|
||||
elif sel == "b":
|
||||
raise importer.ImportAbort()
|
||||
raise importer.ImportAbortError()
|
||||
else:
|
||||
raise Exception(f"Unexpected selection: {sel}")
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
"""Provides a bare-ASCII matching query."""
|
||||
|
||||
|
||||
from unidecode import unidecode
|
||||
|
||||
from beets import ui
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Adds Beatport release and track search support to the autotagger
|
||||
"""
|
||||
"""Adds Beatport release and track search support to the autotagger"""
|
||||
|
||||
import json
|
||||
import re
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Some simple performance benchmarks for beets.
|
||||
"""
|
||||
|
||||
"""Some simple performance benchmarks for beets."""
|
||||
|
||||
import cProfile
|
||||
import timeit
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ Beets library. Attempts to implement a compatible protocol to allow
|
|||
use of the wide range of MPD clients.
|
||||
"""
|
||||
|
||||
|
||||
import inspect
|
||||
import math
|
||||
import random
|
||||
|
|
@ -27,7 +26,6 @@ import sys
|
|||
import time
|
||||
import traceback
|
||||
from string import Template
|
||||
from typing import List
|
||||
|
||||
from mediafile import MediaFile
|
||||
|
||||
|
|
@ -168,13 +166,13 @@ def cast_arg(t, val):
|
|||
raise ArgumentTypeError()
|
||||
|
||||
|
||||
class BPDClose(Exception):
|
||||
class BPDCloseError(Exception):
|
||||
"""Raised by a command invocation to indicate that the connection
|
||||
should be closed.
|
||||
"""
|
||||
|
||||
|
||||
class BPDIdle(Exception):
|
||||
class BPDIdleError(Exception):
|
||||
"""Raised by a command to indicate the client wants to enter the idle state
|
||||
and should be notified when a relevant event happens.
|
||||
"""
|
||||
|
|
@ -349,7 +347,7 @@ class BaseServer:
|
|||
for system in subsystems:
|
||||
if system not in SUBSYSTEMS:
|
||||
raise BPDError(ERROR_ARG, f"Unrecognised idle event: {system}")
|
||||
raise BPDIdle(subsystems) # put the connection into idle mode
|
||||
raise BPDIdleError(subsystems) # put the connection into idle mode
|
||||
|
||||
def cmd_kill(self, conn):
|
||||
"""Exits the server process."""
|
||||
|
|
@ -357,7 +355,7 @@ class BaseServer:
|
|||
|
||||
def cmd_close(self, conn):
|
||||
"""Closes the connection."""
|
||||
raise BPDClose()
|
||||
raise BPDCloseError()
|
||||
|
||||
def cmd_password(self, conn, password):
|
||||
"""Attempts password authentication."""
|
||||
|
|
@ -739,13 +737,13 @@ class BaseServer:
|
|||
|
||||
# Additions to the MPD protocol.
|
||||
|
||||
def cmd_crash_TypeError(self, conn): # noqa: N802
|
||||
def cmd_crash(self, conn):
|
||||
"""Deliberately trigger a TypeError for testing purposes.
|
||||
We want to test that the server properly responds with ERROR_SYSTEM
|
||||
without crashing, and that this is not treated as ERROR_ARG (since it
|
||||
is caused by a programming error, not a protocol error).
|
||||
"""
|
||||
"a" + 2
|
||||
raise TypeError
|
||||
|
||||
|
||||
class Connection:
|
||||
|
|
@ -773,8 +771,8 @@ class Connection:
|
|||
if isinstance(lines, str):
|
||||
lines = [lines]
|
||||
out = NEWLINE.join(lines) + NEWLINE
|
||||
for l in out.split(NEWLINE)[:-1]:
|
||||
self.debug(l, kind=">")
|
||||
for line in out.split(NEWLINE)[:-1]:
|
||||
self.debug(line, kind=">")
|
||||
if isinstance(out, str):
|
||||
out = out.encode("utf-8")
|
||||
return self.sock.sendall(out)
|
||||
|
|
@ -853,8 +851,8 @@ class MPDConnection(Connection):
|
|||
self.disconnect() # Client sent a blank line.
|
||||
break
|
||||
line = line.decode("utf8") # MPD protocol uses UTF-8.
|
||||
for l in line.split(NEWLINE):
|
||||
self.debug(l, kind="<")
|
||||
for line in line.split(NEWLINE):
|
||||
self.debug(line, kind="<")
|
||||
|
||||
if self.idle_subscriptions:
|
||||
# The connection is in idle mode.
|
||||
|
|
@ -888,12 +886,12 @@ class MPDConnection(Connection):
|
|||
# Ordinary command.
|
||||
try:
|
||||
yield bluelet.call(self.do_command(Command(line)))
|
||||
except BPDClose:
|
||||
except BPDCloseError:
|
||||
# Command indicates that the conn should close.
|
||||
self.sock.close()
|
||||
self.disconnect() # Client explicitly closed.
|
||||
return
|
||||
except BPDIdle as e:
|
||||
except BPDIdleError as e:
|
||||
self.idle_subscriptions = e.subsystems
|
||||
self.debug(
|
||||
"awaiting: {}".format(" ".join(e.subsystems)), kind="z"
|
||||
|
|
@ -922,8 +920,8 @@ class ControlConnection(Connection):
|
|||
if not line:
|
||||
break # Client sent a blank line.
|
||||
line = line.decode("utf8") # Protocol uses UTF-8.
|
||||
for l in line.split(NEWLINE):
|
||||
self.debug(l, kind="<")
|
||||
for line in line.split(NEWLINE):
|
||||
self.debug(line, kind="<")
|
||||
command = Command(line)
|
||||
try:
|
||||
func = command.delegate("ctrl_", self)
|
||||
|
|
@ -1046,12 +1044,12 @@ class Command:
|
|||
e.cmd_name = self.name
|
||||
raise e
|
||||
|
||||
except BPDClose:
|
||||
except BPDCloseError:
|
||||
# An indication that the connection should close. Send
|
||||
# it on the Connection.
|
||||
raise
|
||||
|
||||
except BPDIdle:
|
||||
except BPDIdleError:
|
||||
raise
|
||||
|
||||
except Exception:
|
||||
|
|
@ -1060,7 +1058,7 @@ class Command:
|
|||
raise BPDError(ERROR_SYSTEM, "server error", self.name)
|
||||
|
||||
|
||||
class CommandList(List[Command]):
|
||||
class CommandList(list[Command]):
|
||||
"""A list of commands issued by the client for processing by the
|
||||
server. May be verbose, in which case the response is delimited, or
|
||||
not. Should be a list of `Command` objects.
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
music player.
|
||||
"""
|
||||
|
||||
|
||||
import _thread
|
||||
import copy
|
||||
import os
|
||||
|
|
|
|||
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
"""Determine BPM by pressing a key to the rhythm."""
|
||||
|
||||
|
||||
import time
|
||||
|
||||
from beets import ui
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Update library's tags using Beatport.
|
||||
"""
|
||||
"""Update library's tags using Beatport."""
|
||||
|
||||
from beets import autotag, library, ui, util
|
||||
from beets.plugins import BeetsPlugin, apply_item_changes
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Provides the %bucket{} function for path formatting.
|
||||
"""
|
||||
|
||||
"""Provides the %bucket{} function for path formatting."""
|
||||
|
||||
import re
|
||||
import string
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Converts tracks or albums to external directory
|
||||
"""
|
||||
"""Converts tracks or albums to external directory"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import shlex
|
||||
|
|
@ -85,18 +85,23 @@ def get_format(fmt=None):
|
|||
return (command.encode("utf-8"), extension.encode("utf-8"))
|
||||
|
||||
|
||||
def in_no_convert(item: Item) -> bool:
|
||||
no_convert_query = config["convert"]["no_convert"].as_str()
|
||||
|
||||
if no_convert_query:
|
||||
query, _ = parse_query_string(no_convert_query, Item)
|
||||
return query.match(item)
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def should_transcode(item, fmt):
|
||||
"""Determine whether the item should be transcoded as part of
|
||||
conversion (i.e., its bitrate is high or it has the wrong format).
|
||||
"""
|
||||
no_convert_queries = config["convert"]["no_convert"].as_str_seq()
|
||||
if no_convert_queries:
|
||||
for query_string in no_convert_queries:
|
||||
query, _ = parse_query_string(query_string, Item)
|
||||
if query.match(item):
|
||||
return False
|
||||
if config["convert"]["never_convert_lossy_files"] and not (
|
||||
item.format.lower() in LOSSLESS_FORMATS
|
||||
if in_no_convert(item) or (
|
||||
config["convert"]["never_convert_lossy_files"]
|
||||
and item.format.lower() not in LOSSLESS_FORMATS
|
||||
):
|
||||
return False
|
||||
maxbr = config["convert"]["max_bitrate"].get(Optional(int))
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Adds Deezer release and track search support to the autotagger
|
||||
"""
|
||||
"""Adds Deezer release and track search support to the autotagger"""
|
||||
|
||||
import collections
|
||||
import time
|
||||
|
|
@ -112,8 +111,8 @@ class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin):
|
|||
day = None
|
||||
else:
|
||||
raise ui.UserError(
|
||||
"Invalid `release_date` returned "
|
||||
"by {} API: '{}'".format(self.data_source, release_date)
|
||||
f"Invalid `release_date` returned by {self.data_source} API: "
|
||||
f"{release_date!r}"
|
||||
)
|
||||
tracks_obj = self.fetch_data(self.album_url + deezer_id + "/tracks")
|
||||
if tracks_obj is None:
|
||||
|
|
@ -279,12 +278,20 @@ class DeezerPlugin(MetadataSourcePlugin, BeetsPlugin):
|
|||
if not query:
|
||||
return None
|
||||
self._log.debug(f"Searching {self.data_source} for '{query}'")
|
||||
response = requests.get(
|
||||
self.search_url + query_type,
|
||||
params={"q": query},
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
try:
|
||||
response = requests.get(
|
||||
self.search_url + query_type,
|
||||
params={"q": query},
|
||||
timeout=10,
|
||||
)
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.RequestException as e:
|
||||
self._log.error(
|
||||
"Error fetching data from {} API\n Error: {}",
|
||||
self.data_source,
|
||||
e,
|
||||
)
|
||||
return None
|
||||
response_data = response.json().get("data", [])
|
||||
self._log.debug(
|
||||
"Found {} result(s) from {} for '{}'",
|
||||
|
|
|
|||
|
|
@ -16,6 +16,8 @@
|
|||
python3-discogs-client library.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import http.client
|
||||
import json
|
||||
import os
|
||||
|
|
@ -30,6 +32,7 @@ from discogs_client import Client, Master, Release
|
|||
from discogs_client import __version__ as dc_string
|
||||
from discogs_client.exceptions import DiscogsAPIError
|
||||
from requests.exceptions import ConnectionError
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
import beets
|
||||
import beets.ui
|
||||
|
|
@ -52,6 +55,12 @@ CONNECTION_ERRORS = (
|
|||
)
|
||||
|
||||
|
||||
class ReleaseFormat(TypedDict):
|
||||
name: str
|
||||
qty: int
|
||||
descriptions: list[str] | None
|
||||
|
||||
|
||||
class DiscogsPlugin(BeetsPlugin):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
|
@ -363,6 +372,18 @@ class DiscogsPlugin(BeetsPlugin):
|
|||
)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def get_media_and_albumtype(
|
||||
formats: list[ReleaseFormat] | None,
|
||||
) -> tuple[str | None, str | None]:
|
||||
media = albumtype = None
|
||||
if formats and (first_format := formats[0]):
|
||||
if descriptions := first_format["descriptions"]:
|
||||
albumtype = ", ".join(descriptions)
|
||||
media = first_format["name"]
|
||||
|
||||
return media, albumtype
|
||||
|
||||
def get_album_info(self, result):
|
||||
"""Returns an AlbumInfo object for a discogs Release object."""
|
||||
# Explicitly reload the `Release` fields, as they might not be yet
|
||||
|
|
@ -413,13 +434,11 @@ class DiscogsPlugin(BeetsPlugin):
|
|||
|
||||
# Extract information for the optional AlbumInfo fields that are
|
||||
# contained on nested discogs fields.
|
||||
albumtype = media = label = catalogno = labelid = None
|
||||
if result.data.get("formats"):
|
||||
albumtype = (
|
||||
", ".join(result.data["formats"][0].get("descriptions", []))
|
||||
or None
|
||||
)
|
||||
media = result.data["formats"][0]["name"]
|
||||
media, albumtype = self.get_media_and_albumtype(
|
||||
result.data.get("formats")
|
||||
)
|
||||
|
||||
label = catalogno = labelid = None
|
||||
if result.data.get("labels"):
|
||||
label = result.data["labels"][0].get("name")
|
||||
catalogno = result.data["labels"][0].get("catno")
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""List duplicate tracks or albums.
|
||||
"""
|
||||
"""List duplicate tracks or albums."""
|
||||
|
||||
import os
|
||||
import shlex
|
||||
|
|
@ -304,7 +303,9 @@ class DuplicatesPlugin(BeetsPlugin):
|
|||
kind = "items" if all(isinstance(o, Item) for o in objs) else "albums"
|
||||
|
||||
if tiebreak and kind in tiebreak.keys():
|
||||
key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind])
|
||||
|
||||
def key(x):
|
||||
return tuple(getattr(x, k) for k in tiebreak[kind])
|
||||
else:
|
||||
if kind == "items":
|
||||
|
||||
|
|
@ -317,9 +318,13 @@ class DuplicatesPlugin(BeetsPlugin):
|
|||
)
|
||||
|
||||
fields = Item.all_keys()
|
||||
key = lambda x: sum(1 for f in fields if truthy(getattr(x, f)))
|
||||
|
||||
def key(x):
|
||||
return sum(1 for f in fields if truthy(getattr(x, f)))
|
||||
else:
|
||||
key = lambda x: len(x.items())
|
||||
|
||||
def key(x):
|
||||
return len(x.items())
|
||||
|
||||
return sorted(objs, key=key, reverse=True)
|
||||
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Open metadata information in a text editor to let the user edit it.
|
||||
"""
|
||||
"""Open metadata information in a text editor to let the user edit it."""
|
||||
|
||||
import codecs
|
||||
import os
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
"""Updates the Emby Library whenever the beets library is changed.
|
||||
|
||||
emby:
|
||||
host: localhost
|
||||
port: 8096
|
||||
username: user
|
||||
apikey: apikey
|
||||
password: password
|
||||
emby:
|
||||
host: localhost
|
||||
port: 8096
|
||||
username: user
|
||||
apikey: apikey
|
||||
password: password
|
||||
"""
|
||||
|
||||
import hashlib
|
||||
|
|
|
|||
|
|
@ -11,9 +11,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Exports data from beets
|
||||
"""
|
||||
|
||||
"""Exports data from beets"""
|
||||
|
||||
import codecs
|
||||
import csv
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Fetches album art.
|
||||
"""
|
||||
"""Fetches album art."""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
|
@ -1252,10 +1251,6 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
|
|||
self.cautious = self.config["cautious"].get(bool)
|
||||
self.store_source = self.config["store_source"].get(bool)
|
||||
|
||||
self.src_removed = config["import"]["delete"].get(bool) or config[
|
||||
"import"
|
||||
]["move"].get(bool)
|
||||
|
||||
self.cover_format = self.config["cover_format"].get(
|
||||
confuse.Optional(str)
|
||||
)
|
||||
|
|
@ -1297,6 +1292,10 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
|
|||
for s, c in sources
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def _is_source_file_removal_enabled():
|
||||
return config["import"]["delete"] or config["import"]["move"]
|
||||
|
||||
# Asynchronous; after music is added to the library.
|
||||
def fetch_art(self, session, task):
|
||||
"""Find art for the album being imported."""
|
||||
|
|
@ -1339,10 +1338,11 @@ class FetchArtPlugin(plugins.BeetsPlugin, RequestMixin):
|
|||
"""Place the discovered art in the filesystem."""
|
||||
if task in self.art_candidates:
|
||||
candidate = self.art_candidates.pop(task)
|
||||
removal_enabled = FetchArtPlugin._is_source_file_removal_enabled()
|
||||
|
||||
self._set_art(task.album, candidate, not self.src_removed)
|
||||
self._set_art(task.album, candidate, not removal_enabled)
|
||||
|
||||
if self.src_removed:
|
||||
if removal_enabled:
|
||||
task.prune(candidate.path)
|
||||
|
||||
# Manual album art fetching.
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Filter imported files using a regular expression.
|
||||
"""
|
||||
|
||||
"""Filter imported files using a regular expression."""
|
||||
|
||||
import re
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,6 @@ by default but can be added via the `-e` / `--extravalues` flag. For example:
|
|||
`beet fish -e genre -e albumartist`
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
from operator import attrgetter
|
||||
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Creates freedesktop.org-compliant .directory files on an album level.
|
||||
"""
|
||||
|
||||
"""Creates freedesktop.org-compliant .directory files on an album level."""
|
||||
|
||||
from beets import ui
|
||||
from beets.plugins import BeetsPlugin
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Moves "featured" artists to the title from the artist field.
|
||||
"""
|
||||
"""Moves "featured" artists to the title from the artist field."""
|
||||
|
||||
import re
|
||||
|
||||
|
|
@ -38,7 +37,13 @@ def split_on_feat(artist):
|
|||
|
||||
def contains_feat(title):
|
||||
"""Determine whether the title contains a "featured" marker."""
|
||||
return bool(re.search(plugins.feat_tokens(), title, flags=re.IGNORECASE))
|
||||
return bool(
|
||||
re.search(
|
||||
plugins.feat_tokens(for_artist=False),
|
||||
title,
|
||||
flags=re.IGNORECASE,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def find_feat_part(artist, albumartist):
|
||||
|
|
@ -78,6 +83,7 @@ class FtInTitlePlugin(plugins.BeetsPlugin):
|
|||
"auto": True,
|
||||
"drop": False,
|
||||
"format": "feat. {0}",
|
||||
"keep_in_artist": False,
|
||||
}
|
||||
)
|
||||
|
||||
|
|
@ -101,10 +107,11 @@ class FtInTitlePlugin(plugins.BeetsPlugin):
|
|||
def func(lib, opts, args):
|
||||
self.config.set_args(opts)
|
||||
drop_feat = self.config["drop"].get(bool)
|
||||
keep_in_artist_field = self.config["keep_in_artist"].get(bool)
|
||||
write = ui.should_write()
|
||||
|
||||
for item in lib.items(ui.decargs(args)):
|
||||
self.ft_in_title(item, drop_feat)
|
||||
self.ft_in_title(item, drop_feat, keep_in_artist_field)
|
||||
item.store()
|
||||
if write:
|
||||
item.try_write()
|
||||
|
|
@ -115,20 +122,27 @@ class FtInTitlePlugin(plugins.BeetsPlugin):
|
|||
def imported(self, session, task):
|
||||
"""Import hook for moving featuring artist automatically."""
|
||||
drop_feat = self.config["drop"].get(bool)
|
||||
keep_in_artist_field = self.config["keep_in_artist"].get(bool)
|
||||
|
||||
for item in task.imported_items():
|
||||
self.ft_in_title(item, drop_feat)
|
||||
self.ft_in_title(item, drop_feat, keep_in_artist_field)
|
||||
item.store()
|
||||
|
||||
def update_metadata(self, item, feat_part, drop_feat):
|
||||
def update_metadata(self, item, feat_part, drop_feat, keep_in_artist_field):
|
||||
"""Choose how to add new artists to the title and set the new
|
||||
metadata. Also, print out messages about any changes that are made.
|
||||
If `drop_feat` is set, then do not add the artist to the title; just
|
||||
remove it from the artist field.
|
||||
"""
|
||||
# In all cases, update the artist fields.
|
||||
self._log.info("artist: {0} -> {1}", item.artist, item.albumartist)
|
||||
item.artist = item.albumartist
|
||||
# In case the artist is kept, do not update the artist fields.
|
||||
if keep_in_artist_field:
|
||||
self._log.info(
|
||||
"artist: {0} (Not changing due to keep_in_artist)", item.artist
|
||||
)
|
||||
else:
|
||||
self._log.info("artist: {0} -> {1}", item.artist, item.albumartist)
|
||||
item.artist = item.albumartist
|
||||
|
||||
if item.artist_sort:
|
||||
# Just strip the featured artist from the sort name.
|
||||
item.artist_sort, _ = split_on_feat(item.artist_sort)
|
||||
|
|
@ -142,7 +156,7 @@ class FtInTitlePlugin(plugins.BeetsPlugin):
|
|||
self._log.info("title: {0} -> {1}", item.title, new_title)
|
||||
item.title = new_title
|
||||
|
||||
def ft_in_title(self, item, drop_feat):
|
||||
def ft_in_title(self, item, drop_feat, keep_in_artist_field):
|
||||
"""Look for featured artists in the item's artist fields and move
|
||||
them to the title.
|
||||
"""
|
||||
|
|
@ -163,6 +177,8 @@ class FtInTitlePlugin(plugins.BeetsPlugin):
|
|||
|
||||
# If we have a featuring artist, move it to the title.
|
||||
if feat_part:
|
||||
self.update_metadata(item, feat_part, drop_feat)
|
||||
self.update_metadata(
|
||||
item, feat_part, drop_feat, keep_in_artist_field
|
||||
)
|
||||
else:
|
||||
self._log.info("no featuring artists found")
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Provides a fuzzy matching query.
|
||||
"""
|
||||
|
||||
"""Provides a fuzzy matching query."""
|
||||
|
||||
import difflib
|
||||
|
||||
|
|
|
|||
|
|
@ -17,6 +17,7 @@
|
|||
music player. Also allow printing the new file locations to stdout in case
|
||||
one wants to manually add music to a player by its path.
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import os
|
||||
import re
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Shows file metadata.
|
||||
"""
|
||||
|
||||
"""Shows file metadata."""
|
||||
|
||||
import os
|
||||
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Allows inline path template customization code in the config file.
|
||||
"""
|
||||
"""Allows inline path template customization code in the config file."""
|
||||
|
||||
import itertools
|
||||
import traceback
|
||||
|
|
|
|||
|
|
@ -11,9 +11,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Adds support for ipfs. Requires go-ipfs and a running ipfs daemon
|
||||
"""
|
||||
|
||||
"""Adds support for ipfs. Requires go-ipfs and a running ipfs daemon"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Uses the `KeyFinder` program to add the `initial_key` field.
|
||||
"""
|
||||
|
||||
"""Uses the `KeyFinder` program to add the `initial_key` field."""
|
||||
|
||||
import os.path
|
||||
import subprocess
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ and has been edited to remove some questionable entries.
|
|||
The scraper script used is available here:
|
||||
https://gist.github.com/1241307
|
||||
"""
|
||||
|
||||
import codecs
|
||||
import os
|
||||
import traceback
|
||||
|
|
@ -408,9 +409,14 @@ class LastGenrePlugin(plugins.BeetsPlugin):
|
|||
for album in lib.albums(ui.decargs(args)):
|
||||
album.genre, src = self._get_genre(album)
|
||||
self._log.info(
|
||||
"genre for album {0} ({1}): {0.genre}", album, src
|
||||
'genre for album "{0.album}" ({1}): {0.genre}',
|
||||
album,
|
||||
src,
|
||||
)
|
||||
album.store()
|
||||
if "track" in self.sources:
|
||||
album.store(inherit=False)
|
||||
else:
|
||||
album.store()
|
||||
|
||||
for item in album.items():
|
||||
# If we're using track-level sources, also look up each
|
||||
|
|
@ -419,7 +425,7 @@ class LastGenrePlugin(plugins.BeetsPlugin):
|
|||
item.genre, src = self._get_genre(item)
|
||||
item.store()
|
||||
self._log.info(
|
||||
"genre for track {0} ({1}): {0.genre}",
|
||||
'genre for track "{0.title}" ({1}): {0.genre}',
|
||||
item,
|
||||
src,
|
||||
)
|
||||
|
|
@ -431,10 +437,10 @@ class LastGenrePlugin(plugins.BeetsPlugin):
|
|||
# an album
|
||||
for item in lib.items(ui.decargs(args)):
|
||||
item.genre, src = self._get_genre(item)
|
||||
self._log.debug(
|
||||
"added last.fm item genre ({0}): {1}", src, item.genre
|
||||
)
|
||||
item.store()
|
||||
self._log.info(
|
||||
"genre for track {0.title} ({1}): {0.genre}", item, src
|
||||
)
|
||||
|
||||
lastgenre_cmd.func = lastgenre_func
|
||||
return [lastgenre_cmd]
|
||||
|
|
@ -445,23 +451,32 @@ class LastGenrePlugin(plugins.BeetsPlugin):
|
|||
album = task.album
|
||||
album.genre, src = self._get_genre(album)
|
||||
self._log.debug(
|
||||
"added last.fm album genre ({0}): {1}", src, album.genre
|
||||
'genre for album "{0.album}" ({1}): {0.genre}', album, src
|
||||
)
|
||||
album.store()
|
||||
|
||||
# If we're using track-level sources, store the album genre only,
|
||||
# then also look up individual track genres.
|
||||
if "track" in self.sources:
|
||||
album.store(inherit=False)
|
||||
for item in album.items():
|
||||
item.genre, src = self._get_genre(item)
|
||||
self._log.debug(
|
||||
"added last.fm item genre ({0}): {1}", src, item.genre
|
||||
'genre for track "{0.title}" ({1}): {0.genre}',
|
||||
item,
|
||||
src,
|
||||
)
|
||||
item.store()
|
||||
# Store the album genre and inherit to tracks.
|
||||
else:
|
||||
album.store()
|
||||
|
||||
else:
|
||||
item = task.item
|
||||
item.genre, src = self._get_genre(item)
|
||||
self._log.debug(
|
||||
"added last.fm item genre ({0}): {1}", src, item.genre
|
||||
'genre for track "{0.title}" ({1}): {0.genre}',
|
||||
item,
|
||||
src,
|
||||
)
|
||||
item.store()
|
||||
|
||||
|
|
|
|||
|
|
@ -148,9 +148,6 @@ class ListenBrainzPlugin(BeetsPlugin):
|
|||
return self._make_request(url)
|
||||
|
||||
def get_listenbrainz_playlists(self):
|
||||
"""Returns a list of playlists created by ListenBrainz."""
|
||||
import re
|
||||
|
||||
resp = self.get_playlists_createdfor(self.username)
|
||||
playlists = resp.get("playlists")
|
||||
listenbrainz_playlists = []
|
||||
|
|
@ -159,35 +156,32 @@ class ListenBrainzPlugin(BeetsPlugin):
|
|||
playlist_info = playlist.get("playlist")
|
||||
if playlist_info.get("creator") == "listenbrainz":
|
||||
title = playlist_info.get("title")
|
||||
match = re.search(
|
||||
r"(Missed Recordings of \d{4}|Discoveries of \d{4})", title
|
||||
self._log.debug(f"Playlist title: {title}")
|
||||
playlist_type = (
|
||||
"Exploration" if "Exploration" in title else "Jams"
|
||||
)
|
||||
if "Exploration" in title:
|
||||
playlist_type = "Exploration"
|
||||
elif "Jams" in title:
|
||||
playlist_type = "Jams"
|
||||
elif match:
|
||||
playlist_type = match.group(1)
|
||||
else:
|
||||
playlist_type = None
|
||||
if "week of " in title:
|
||||
if "week of" in title:
|
||||
date_str = title.split("week of ")[1].split(" ")[0]
|
||||
date = datetime.datetime.strptime(
|
||||
date_str, "%Y-%m-%d"
|
||||
).date()
|
||||
else:
|
||||
date = None
|
||||
continue
|
||||
identifier = playlist_info.get("identifier")
|
||||
id = identifier.split("/")[-1]
|
||||
if playlist_type in ["Jams", "Exploration"]:
|
||||
listenbrainz_playlists.append(
|
||||
{
|
||||
"type": playlist_type,
|
||||
"date": date,
|
||||
"identifier": id,
|
||||
"title": title,
|
||||
}
|
||||
)
|
||||
listenbrainz_playlists.append(
|
||||
{"type": playlist_type, "date": date, "identifier": id}
|
||||
)
|
||||
listenbrainz_playlists = sorted(
|
||||
listenbrainz_playlists, key=lambda x: x["type"]
|
||||
)
|
||||
listenbrainz_playlists = sorted(
|
||||
listenbrainz_playlists, key=lambda x: x["date"], reverse=True
|
||||
)
|
||||
for playlist in listenbrainz_playlists:
|
||||
self._log.debug(
|
||||
f'Playlist: {playlist["type"]} - {playlist["date"]}'
|
||||
)
|
||||
return listenbrainz_playlists
|
||||
|
||||
def get_playlist(self, identifier):
|
||||
|
|
@ -199,17 +193,20 @@ class ListenBrainzPlugin(BeetsPlugin):
|
|||
"""This function returns a list of tracks in the playlist."""
|
||||
tracks = []
|
||||
for track in playlist.get("playlist").get("track"):
|
||||
identifier = track.get("identifier")
|
||||
if isinstance(identifier, list):
|
||||
identifier = identifier[0]
|
||||
|
||||
tracks.append(
|
||||
{
|
||||
"artist": track.get("creator"),
|
||||
"identifier": track.get("identifier").split("/")[-1],
|
||||
"artist": track.get("creator", "Unknown artist"),
|
||||
"identifier": identifier.split("/")[-1],
|
||||
"title": track.get("title"),
|
||||
}
|
||||
)
|
||||
return self.get_track_info(tracks)
|
||||
|
||||
def get_track_info(self, tracks):
|
||||
"""Returns a list of track info."""
|
||||
track_info = []
|
||||
for track in tracks:
|
||||
identifier = track.get("identifier")
|
||||
|
|
@ -242,25 +239,37 @@ class ListenBrainzPlugin(BeetsPlugin):
|
|||
)
|
||||
return track_info
|
||||
|
||||
def get_weekly_playlist(self, index):
|
||||
"""Returns a list of weekly playlists based on the index."""
|
||||
def get_weekly_playlist(self, playlist_type, most_recent=True):
|
||||
# Fetch all playlists
|
||||
playlists = self.get_listenbrainz_playlists()
|
||||
playlist = self.get_playlist(playlists[index].get("identifier"))
|
||||
self._log.info(f"Getting {playlist.get('playlist').get('title')}")
|
||||
# Filter playlists by type
|
||||
filtered_playlists = [
|
||||
p for p in playlists if p["type"] == playlist_type
|
||||
]
|
||||
# Sort playlists by date in descending order
|
||||
sorted_playlists = sorted(
|
||||
filtered_playlists, key=lambda x: x["date"], reverse=True
|
||||
)
|
||||
# Select the most recent or older playlist based on the most_recent flag
|
||||
selected_playlist = (
|
||||
sorted_playlists[0] if most_recent else sorted_playlists[1]
|
||||
)
|
||||
self._log.debug(
|
||||
f"Selected playlist: {selected_playlist['type']} "
|
||||
f"- {selected_playlist['date']}"
|
||||
)
|
||||
# Fetch and return tracks from the selected playlist
|
||||
playlist = self.get_playlist(selected_playlist.get("identifier"))
|
||||
return self.get_tracks_from_playlist(playlist)
|
||||
|
||||
def get_weekly_exploration(self):
|
||||
"""Returns a list of weekly exploration."""
|
||||
return self.get_weekly_playlist(0)
|
||||
return self.get_weekly_playlist("Exploration", most_recent=True)
|
||||
|
||||
def get_weekly_jams(self):
|
||||
"""Returns a list of weekly jams."""
|
||||
return self.get_weekly_playlist(1)
|
||||
return self.get_weekly_playlist("Jams", most_recent=True)
|
||||
|
||||
def get_last_weekly_exploration(self):
|
||||
"""Returns a list of weekly exploration."""
|
||||
return self.get_weekly_playlist(3)
|
||||
return self.get_weekly_playlist("Exploration", most_recent=False)
|
||||
|
||||
def get_last_weekly_jams(self):
|
||||
"""Returns a list of weekly jams."""
|
||||
return self.get_weekly_playlist(3)
|
||||
return self.get_weekly_playlist("Jams", most_recent=False)
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Load SQLite extensions.
|
||||
"""
|
||||
|
||||
"""Load SQLite extensions."""
|
||||
|
||||
import sqlite3
|
||||
|
||||
|
|
|
|||
|
|
@ -12,9 +12,9 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Fetches, embeds, and displays lyrics.
|
||||
"""
|
||||
"""Fetches, embeds, and displays lyrics."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import difflib
|
||||
import errno
|
||||
|
|
@ -24,8 +24,10 @@ import os.path
|
|||
import re
|
||||
import struct
|
||||
import unicodedata
|
||||
import urllib
|
||||
import warnings
|
||||
from functools import partial
|
||||
from typing import ClassVar
|
||||
from urllib.parse import quote, urlencode
|
||||
|
||||
import requests
|
||||
from unidecode import unidecode
|
||||
|
|
@ -48,26 +50,11 @@ except ImportError:
|
|||
|
||||
import beets
|
||||
from beets import plugins, ui
|
||||
from beets.autotag.hooks import string_dist
|
||||
|
||||
DIV_RE = re.compile(r"<(/?)div>?", re.I)
|
||||
COMMENT_RE = re.compile(r"<!--.*-->", re.S)
|
||||
TAG_RE = re.compile(r"<[^>]*>")
|
||||
BREAK_RE = re.compile(r"\n?\s*<br([\s|/][^>]*)*>\s*\n?", re.I)
|
||||
URL_CHARACTERS = {
|
||||
"\u2018": "'",
|
||||
"\u2019": "'",
|
||||
"\u201c": '"',
|
||||
"\u201d": '"',
|
||||
"\u2010": "-",
|
||||
"\u2011": "-",
|
||||
"\u2012": "-",
|
||||
"\u2013": "-",
|
||||
"\u2014": "-",
|
||||
"\u2015": "-",
|
||||
"\u2016": "-",
|
||||
"\u2026": "...",
|
||||
}
|
||||
USER_AGENT = f"beets/{beets.__version__}"
|
||||
|
||||
# The content for the base index.rst generated in ReST mode.
|
||||
|
|
@ -235,21 +222,6 @@ class Backend:
|
|||
self._log = log
|
||||
self.config = config
|
||||
|
||||
@staticmethod
|
||||
def _encode(s):
|
||||
"""Encode the string for inclusion in a URL"""
|
||||
if isinstance(s, str):
|
||||
for char, repl in URL_CHARACTERS.items():
|
||||
s = s.replace(char, repl)
|
||||
s = s.encode("utf-8", "ignore")
|
||||
return urllib.parse.quote(s)
|
||||
|
||||
def build_url(self, artist, title):
|
||||
return self.URL_PATTERN % (
|
||||
self._encode(artist.title()),
|
||||
self._encode(title.title()),
|
||||
)
|
||||
|
||||
def fetch_url(self, url):
|
||||
"""Retrieve the content at a given URL, or return None if the source
|
||||
is unreachable.
|
||||
|
|
@ -305,12 +277,29 @@ class LRCLib(Backend):
|
|||
return None
|
||||
|
||||
if self.config["synced"]:
|
||||
return data.get("syncedLyrics")
|
||||
return data.get("syncedLyrics") or data.get("plainLyrics")
|
||||
|
||||
return data.get("plainLyrics")
|
||||
|
||||
|
||||
class MusiXmatch(Backend):
|
||||
class DirectBackend(Backend):
|
||||
"""A backend for fetching lyrics directly."""
|
||||
|
||||
URL_TEMPLATE: ClassVar[str] #: May include formatting placeholders
|
||||
|
||||
@classmethod
|
||||
def encode(cls, text: str) -> str:
|
||||
"""Encode the string for inclusion in a URL."""
|
||||
raise NotImplementedError
|
||||
|
||||
@classmethod
|
||||
def build_url(cls, *args: str) -> str:
|
||||
return cls.URL_TEMPLATE.format(*map(cls.encode, args))
|
||||
|
||||
|
||||
class MusiXmatch(DirectBackend):
|
||||
URL_TEMPLATE = "https://www.musixmatch.com/lyrics/{}/{}"
|
||||
|
||||
REPLACEMENTS = {
|
||||
r"\s+": "-",
|
||||
"<": "Less_Than",
|
||||
|
|
@ -320,14 +309,12 @@ class MusiXmatch(Backend):
|
|||
r"[\]\}]": ")",
|
||||
}
|
||||
|
||||
URL_PATTERN = "https://www.musixmatch.com/lyrics/%s/%s"
|
||||
|
||||
@classmethod
|
||||
def _encode(cls, s):
|
||||
def encode(cls, text: str) -> str:
|
||||
for old, new in cls.REPLACEMENTS.items():
|
||||
s = re.sub(old, new, s)
|
||||
text = re.sub(old, new, text)
|
||||
|
||||
return super()._encode(s)
|
||||
return quote(unidecode(text))
|
||||
|
||||
def fetch(self, artist, title, album=None, length=None):
|
||||
url = self.build_url(artist, title)
|
||||
|
|
@ -496,90 +483,34 @@ class Genius(Backend):
|
|||
return lyrics_div.get_text()
|
||||
|
||||
|
||||
class Tekstowo(Backend):
|
||||
# Fetch lyrics from Tekstowo.pl.
|
||||
REQUIRES_BS = True
|
||||
class Tekstowo(DirectBackend):
|
||||
"""Fetch lyrics from Tekstowo.pl."""
|
||||
|
||||
BASE_URL = "http://www.tekstowo.pl"
|
||||
URL_PATTERN = BASE_URL + "/wyszukaj.html?search-title=%s&search-artist=%s"
|
||||
REQUIRES_BS = True
|
||||
URL_TEMPLATE = "https://www.tekstowo.pl/piosenka,{},{}.html"
|
||||
|
||||
non_alpha_to_underscore = partial(re.compile(r"\W").sub, "_")
|
||||
|
||||
@classmethod
|
||||
def encode(cls, text: str) -> str:
|
||||
return cls.non_alpha_to_underscore(unidecode(text.lower()))
|
||||
|
||||
def fetch(self, artist, title, album=None, length=None):
|
||||
url = self.build_url(title, artist)
|
||||
search_results = self.fetch_url(url)
|
||||
if not search_results:
|
||||
return None
|
||||
if html := self.fetch_url(self.build_url(artist, title)):
|
||||
return self.extract_lyrics(html)
|
||||
|
||||
song_page_url = self.parse_search_results(search_results)
|
||||
if not song_page_url:
|
||||
return None
|
||||
return None
|
||||
|
||||
song_page_html = self.fetch_url(song_page_url)
|
||||
if not song_page_html:
|
||||
return None
|
||||
|
||||
return self.extract_lyrics(song_page_html, artist, title)
|
||||
|
||||
def parse_search_results(self, html):
|
||||
def extract_lyrics(self, html: str) -> str | None:
|
||||
html = _scrape_strip_cruft(html)
|
||||
html = _scrape_merge_paragraphs(html)
|
||||
|
||||
soup = try_parse_html(html)
|
||||
if not soup:
|
||||
return None
|
||||
|
||||
content_div = soup.find("div", class_="content")
|
||||
if not content_div:
|
||||
return None
|
||||
if lyrics_div := soup.select_one("div.song-text > div.inner-text"):
|
||||
return lyrics_div.get_text()
|
||||
|
||||
card_div = content_div.find("div", class_="card")
|
||||
if not card_div:
|
||||
return None
|
||||
|
||||
song_rows = card_div.find_all("div", class_="box-przeboje")
|
||||
if not song_rows:
|
||||
return None
|
||||
|
||||
song_row = song_rows[0]
|
||||
if not song_row:
|
||||
return None
|
||||
|
||||
link = song_row.find("a")
|
||||
if not link:
|
||||
return None
|
||||
|
||||
return self.BASE_URL + link.get("href")
|
||||
|
||||
def extract_lyrics(self, html, artist, title):
|
||||
html = _scrape_strip_cruft(html)
|
||||
html = _scrape_merge_paragraphs(html)
|
||||
|
||||
soup = try_parse_html(html)
|
||||
if not soup:
|
||||
return None
|
||||
|
||||
info_div = soup.find("div", class_="col-auto")
|
||||
if not info_div:
|
||||
return None
|
||||
|
||||
info_elements = info_div.find_all("a")
|
||||
if not info_elements:
|
||||
return None
|
||||
|
||||
html_title = info_elements[-1].get_text()
|
||||
html_artist = info_elements[-2].get_text()
|
||||
|
||||
title_dist = string_dist(html_title, title)
|
||||
artist_dist = string_dist(html_artist, artist)
|
||||
|
||||
thresh = self.config["dist_thresh"].get(float)
|
||||
if title_dist > thresh or artist_dist > thresh:
|
||||
return None
|
||||
|
||||
lyrics_div = soup.select("div.song-text > div.inner-text")
|
||||
if not lyrics_div:
|
||||
return None
|
||||
|
||||
return lyrics_div[0].get_text()
|
||||
return None
|
||||
|
||||
|
||||
def remove_credits(text):
|
||||
|
|
@ -741,7 +672,7 @@ class Google(Backend):
|
|||
url = "https://www.googleapis.com/customsearch/v1?key=%s&cx=%s&q=%s" % (
|
||||
self.api_key,
|
||||
self.engine_id,
|
||||
urllib.parse.quote(query.encode("utf-8")),
|
||||
quote(query.encode("utf-8")),
|
||||
)
|
||||
|
||||
data = self.fetch_url(url)
|
||||
|
|
@ -888,7 +819,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
|
|||
oauth_token = json.loads(
|
||||
requests.post(
|
||||
oauth_url,
|
||||
data=urllib.parse.urlencode(params),
|
||||
data=urlencode(params),
|
||||
timeout=10,
|
||||
).content
|
||||
)
|
||||
|
|
@ -1063,7 +994,7 @@ class LyricsPlugin(plugins.BeetsPlugin):
|
|||
if any(lyrics):
|
||||
break
|
||||
|
||||
lyrics = "\n\n---\n\n".join([l for l in lyrics if l])
|
||||
lyrics = "\n\n---\n\n".join(filter(None, lyrics))
|
||||
|
||||
if lyrics:
|
||||
self._log.info("fetched lyrics: {0}", item)
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Update library's tags using MusicBrainz.
|
||||
"""
|
||||
"""Update library's tags using MusicBrainz."""
|
||||
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Synchronize information from music player libraries
|
||||
"""
|
||||
|
||||
"""Synchronize information from music player libraries"""
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from importlib import import_module
|
||||
|
|
@ -126,8 +124,7 @@ class MetaSyncPlugin(BeetsPlugin):
|
|||
meta_source_instances[player] = cls(self.config, self._log)
|
||||
except (ImportError, ConfigValueError) as e:
|
||||
self._log.error(
|
||||
"Failed to instantiate metadata source "
|
||||
"'{}': {}".format(player, e)
|
||||
f"Failed to instantiate metadata source {player!r}: {e}"
|
||||
)
|
||||
|
||||
# Avoid needlessly iterating over items
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Synchronize information from amarok's library via dbus
|
||||
"""
|
||||
|
||||
"""Synchronize information from amarok's library via dbus"""
|
||||
|
||||
from datetime import datetime
|
||||
from os.path import basename
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Synchronize information from iTunes's library
|
||||
"""
|
||||
|
||||
"""Synchronize information from iTunes's library"""
|
||||
|
||||
import os
|
||||
import plistlib
|
||||
|
|
|
|||
|
|
@ -13,8 +13,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""List missing tracks.
|
||||
"""
|
||||
"""List missing tracks."""
|
||||
|
||||
from collections import defaultdict
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
and work composition date
|
||||
"""
|
||||
|
||||
|
||||
import musicbrainzngs
|
||||
|
||||
from beets import ui
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Send the results of a query to the configured music player as a playlist.
|
||||
"""
|
||||
"""Send the results of a query to the configured music player as a playlist."""
|
||||
|
||||
import shlex
|
||||
import subprocess
|
||||
|
|
@ -197,7 +196,7 @@ class PlayPlugin(BeetsPlugin):
|
|||
filename = get_temp_filename(__name__, suffix=".m3u")
|
||||
with open(filename, "wb") as m3u:
|
||||
if utf8_bom:
|
||||
m3u.write(b"\xEF\xBB\xBF")
|
||||
m3u.write(b"\xef\xbb\xbf")
|
||||
|
||||
for item in paths_list:
|
||||
m3u.write(item + b"\n")
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
import fnmatch
|
||||
import os
|
||||
import tempfile
|
||||
from typing import Sequence
|
||||
from collections.abc import Sequence
|
||||
|
||||
import beets
|
||||
from beets.dbcore.query import InQuery
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Get a random song or album from the library.
|
||||
"""
|
||||
"""Get a random song or album from the library."""
|
||||
|
||||
from beets.plugins import BeetsPlugin
|
||||
from beets.random import random_objs
|
||||
|
|
|
|||
|
|
@ -13,10 +13,11 @@
|
|||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import enum
|
||||
import math
|
||||
import optparse
|
||||
import os
|
||||
import queue
|
||||
import signal
|
||||
|
|
@ -25,32 +26,24 @@ import sys
|
|||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from logging import Logger
|
||||
from multiprocessing.pool import ThreadPool
|
||||
from threading import Event, Thread
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
DefaultDict,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
cast,
|
||||
)
|
||||
|
||||
from confuse import ConfigView
|
||||
from typing import TYPE_CHECKING, Any, Callable, TypeVar, cast
|
||||
|
||||
from beets import ui
|
||||
from beets.importer import ImportSession, ImportTask
|
||||
from beets.library import Album, Item, Library
|
||||
from beets.plugins import BeetsPlugin
|
||||
from beets.util import command_output, displayable_path, syspath
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import optparse
|
||||
from collections.abc import Sequence
|
||||
from logging import Logger
|
||||
|
||||
from confuse import ConfigView
|
||||
|
||||
from beets.importer import ImportSession, ImportTask
|
||||
from beets.library import Album, Item, Library
|
||||
|
||||
# Utilities.
|
||||
|
||||
|
||||
|
|
@ -69,7 +62,7 @@ class FatalGstreamerPluginReplayGainError(FatalReplayGainError):
|
|||
loading the required plugins."""
|
||||
|
||||
|
||||
def call(args: List[Any], log: Logger, **kwargs: Any):
|
||||
def call(args: list[Any], log: Logger, **kwargs: Any):
|
||||
"""Execute the command and return its output or raise a
|
||||
ReplayGainError on failure.
|
||||
"""
|
||||
|
|
@ -134,9 +127,9 @@ class RgTask:
|
|||
def __init__(
|
||||
self,
|
||||
items: Sequence[Item],
|
||||
album: Optional[Album],
|
||||
album: Album | None,
|
||||
target_level: float,
|
||||
peak_method: Optional[PeakMethod],
|
||||
peak_method: PeakMethod | None,
|
||||
backend_name: str,
|
||||
log: Logger,
|
||||
):
|
||||
|
|
@ -146,8 +139,8 @@ class RgTask:
|
|||
self.peak_method = peak_method
|
||||
self.backend_name = backend_name
|
||||
self._log = log
|
||||
self.album_gain: Optional[Gain] = None
|
||||
self.track_gains: Optional[List[Gain]] = None
|
||||
self.album_gain: Gain | None = None
|
||||
self.track_gains: list[Gain] | None = None
|
||||
|
||||
def _store_track_gain(self, item: Item, track_gain: Gain):
|
||||
"""Store track gain for a single item in the database."""
|
||||
|
|
@ -236,7 +229,7 @@ class R128Task(RgTask):
|
|||
def __init__(
|
||||
self,
|
||||
items: Sequence[Item],
|
||||
album: Optional[Album],
|
||||
album: Album | None,
|
||||
target_level: float,
|
||||
backend_name: str,
|
||||
log: Logger,
|
||||
|
|
@ -334,9 +327,7 @@ class FfmpegBackend(Backend):
|
|||
task.target_level,
|
||||
task.peak_method,
|
||||
count_blocks=False,
|
||||
)[
|
||||
0
|
||||
] # take only the gain, discarding number of gating blocks
|
||||
)[0] # take only the gain, discarding number of gating blocks
|
||||
for item in task.items
|
||||
]
|
||||
|
||||
|
|
@ -350,7 +341,7 @@ class FfmpegBackend(Backend):
|
|||
|
||||
# analyse tracks
|
||||
# Gives a list of tuples (track_gain, track_n_blocks)
|
||||
track_results: List[Tuple[Gain, int]] = [
|
||||
track_results: list[tuple[Gain, int]] = [
|
||||
self._analyse_item(
|
||||
item,
|
||||
task.target_level,
|
||||
|
|
@ -360,7 +351,7 @@ class FfmpegBackend(Backend):
|
|||
for item in task.items
|
||||
]
|
||||
|
||||
track_gains: List[Gain] = [tg for tg, _nb in track_results]
|
||||
track_gains: list[Gain] = [tg for tg, _nb in track_results]
|
||||
|
||||
# Album peak is maximum track peak
|
||||
album_peak = max(tg.peak for tg in track_gains)
|
||||
|
|
@ -411,8 +402,8 @@ class FfmpegBackend(Backend):
|
|||
return task
|
||||
|
||||
def _construct_cmd(
|
||||
self, item: Item, peak_method: Optional[PeakMethod]
|
||||
) -> List[Union[str, bytes]]:
|
||||
self, item: Item, peak_method: PeakMethod | None
|
||||
) -> list[str | bytes]:
|
||||
"""Construct the shell command to analyse items."""
|
||||
return [
|
||||
self._ffmpeg_path,
|
||||
|
|
@ -435,9 +426,9 @@ class FfmpegBackend(Backend):
|
|||
self,
|
||||
item: Item,
|
||||
target_level: float,
|
||||
peak_method: Optional[PeakMethod],
|
||||
peak_method: PeakMethod | None,
|
||||
count_blocks: bool = True,
|
||||
) -> Tuple[Gain, int]:
|
||||
) -> tuple[Gain, int]:
|
||||
"""Analyse item. Return a pair of a Gain object and the number
|
||||
of gating blocks above the threshold.
|
||||
|
||||
|
|
@ -649,7 +640,7 @@ class CommandBackend(Backend):
|
|||
items: Sequence[Item],
|
||||
target_level: float,
|
||||
is_album: bool,
|
||||
) -> List[Gain]:
|
||||
) -> list[Gain]:
|
||||
"""Computes the track or album gain of a list of items, returns
|
||||
a list of TrackGain objects.
|
||||
|
||||
|
|
@ -669,7 +660,7 @@ class CommandBackend(Backend):
|
|||
# tag-writing; this turns the mp3gain/aacgain tool into a gain
|
||||
# calculator rather than a tag manipulator because we take care
|
||||
# of changing tags ourselves.
|
||||
cmd: List[Union[bytes, str]] = [self.command, "-o", "-s", "s"]
|
||||
cmd: list[bytes | str] = [self.command, "-o", "-s", "s"]
|
||||
if self.noclip:
|
||||
# Adjust to avoid clipping.
|
||||
cmd = cmd + ["-k"]
|
||||
|
|
@ -687,7 +678,7 @@ class CommandBackend(Backend):
|
|||
output, len(items) + (1 if is_album else 0)
|
||||
)
|
||||
|
||||
def parse_tool_output(self, text: bytes, num_lines: int) -> List[Gain]:
|
||||
def parse_tool_output(self, text: bytes, num_lines: int) -> list[Gain]:
|
||||
"""Given the tab-delimited output from an invocation of mp3gain
|
||||
or aacgain, parse the text and return a list of dictionaries
|
||||
containing information about each analyzed file.
|
||||
|
|
@ -773,7 +764,7 @@ class GStreamerBackend(Backend):
|
|||
|
||||
self._main_loop = self.GLib.MainLoop()
|
||||
|
||||
self._files: List[bytes] = []
|
||||
self._files: list[bytes] = []
|
||||
|
||||
def _import_gst(self):
|
||||
"""Import the necessary GObject-related modules and assign `Gst`
|
||||
|
|
@ -813,7 +804,7 @@ class GStreamerBackend(Backend):
|
|||
self._files = [i.path for i in items]
|
||||
|
||||
# FIXME: Turn this into DefaultDict[bytes, Gain]
|
||||
self._file_tags: DefaultDict[bytes, Dict[str, float]] = (
|
||||
self._file_tags: collections.defaultdict[bytes, dict[str, float]] = (
|
||||
collections.defaultdict(dict)
|
||||
)
|
||||
|
||||
|
|
@ -1194,20 +1185,20 @@ class ExceptionWatcher(Thread):
|
|||
# whether `_stopevent` is set
|
||||
pass
|
||||
|
||||
def join(self, timeout: Optional[float] = None):
|
||||
def join(self, timeout: float | None = None):
|
||||
self._stopevent.set()
|
||||
Thread.join(self, timeout)
|
||||
|
||||
|
||||
# Main plugin logic.
|
||||
|
||||
BACKEND_CLASSES: List[Type[Backend]] = [
|
||||
BACKEND_CLASSES: list[type[Backend]] = [
|
||||
CommandBackend,
|
||||
GStreamerBackend,
|
||||
AudioToolsBackend,
|
||||
FfmpegBackend,
|
||||
]
|
||||
BACKENDS: Dict[str, Type[Backend]] = {b.NAME: b for b in BACKEND_CLASSES}
|
||||
BACKENDS: dict[str, type[Backend]] = {b.NAME: b for b in BACKEND_CLASSES}
|
||||
|
||||
|
||||
class ReplayGainPlugin(BeetsPlugin):
|
||||
|
|
@ -1334,7 +1325,7 @@ class ReplayGainPlugin(BeetsPlugin):
|
|||
self,
|
||||
items: Sequence[Item],
|
||||
use_r128: bool,
|
||||
album: Optional[Album] = None,
|
||||
album: Album | None = None,
|
||||
) -> RgTask:
|
||||
if use_r128:
|
||||
return R128Task(
|
||||
|
|
@ -1377,7 +1368,7 @@ class ReplayGainPlugin(BeetsPlugin):
|
|||
|
||||
self._log.info("analyzing {0}", album)
|
||||
|
||||
discs: Dict[int, List[Item]] = {}
|
||||
discs: dict[int, list[Item]] = {}
|
||||
if self.config["per_disc"].get(bool):
|
||||
for item in album.items():
|
||||
if discs.get(item.disc) is None:
|
||||
|
|
@ -1449,8 +1440,8 @@ class ReplayGainPlugin(BeetsPlugin):
|
|||
def _apply(
|
||||
self,
|
||||
func: Callable[..., AnyRgTask],
|
||||
args: List[Any],
|
||||
kwds: Dict[str, Any],
|
||||
args: list[Any],
|
||||
kwds: dict[str, Any],
|
||||
callback: Callable[[AnyRgTask], Any],
|
||||
):
|
||||
if self.pool is not None:
|
||||
|
|
@ -1527,7 +1518,7 @@ class ReplayGainPlugin(BeetsPlugin):
|
|||
self,
|
||||
lib: Library,
|
||||
opts: optparse.Values,
|
||||
args: List[str],
|
||||
args: list[str],
|
||||
):
|
||||
try:
|
||||
write = ui.should_write(opts.write)
|
||||
|
|
@ -1564,7 +1555,7 @@ class ReplayGainPlugin(BeetsPlugin):
|
|||
# Silence interrupt exceptions
|
||||
pass
|
||||
|
||||
def commands(self) -> List[ui.Subcommand]:
|
||||
def commands(self) -> list[ui.Subcommand]:
|
||||
"""Return the "replaygain" ui subcommand."""
|
||||
cmd = ui.Subcommand("replaygain", help="analyze for ReplayGain")
|
||||
cmd.parser.add_album_option()
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
automatically whenever tags are written.
|
||||
"""
|
||||
|
||||
|
||||
import mediafile
|
||||
import mutagen
|
||||
|
||||
|
|
|
|||
|
|
@ -12,9 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
"""Generates smart playlists based on beets queries.
|
||||
"""
|
||||
|
||||
"""Generates smart playlists based on beets queries."""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
|
|
|||
|
|
@ -34,8 +34,7 @@ class Substitute(BeetsPlugin):
|
|||
"""Do the actual replacing."""
|
||||
if text:
|
||||
for pattern, replacement in self.substitute_rules:
|
||||
if pattern.match(text.lower()):
|
||||
return replacement
|
||||
text = pattern.sub(replacement, text)
|
||||
return text
|
||||
else:
|
||||
return ""
|
||||
|
|
@ -47,10 +46,8 @@ class Substitute(BeetsPlugin):
|
|||
substitute rules.
|
||||
"""
|
||||
super().__init__()
|
||||
self.substitute_rules = []
|
||||
self.template_funcs["substitute"] = self.tmpl_substitute
|
||||
|
||||
for key, view in self.config.items():
|
||||
value = view.as_str()
|
||||
pattern = re.compile(key.lower())
|
||||
self.substitute_rules.append((pattern, value))
|
||||
self.substitute_rules = [
|
||||
(re.compile(key, flags=re.IGNORECASE), value)
|
||||
for key, value in self.config.flatten().items()
|
||||
]
|
||||
|
|
|
|||
|
|
@ -14,9 +14,7 @@
|
|||
|
||||
"""Moves patterns in path formats (suitable for moving articles)."""
|
||||
|
||||
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
from beets.plugins import BeetsPlugin
|
||||
|
||||
|
|
@ -29,7 +27,7 @@ FORMAT = "{0}, {1}"
|
|||
|
||||
|
||||
class ThePlugin(BeetsPlugin):
|
||||
patterns: List[str] = []
|
||||
patterns: list[str] = []
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
|
|
|||
|
|
@ -18,7 +18,6 @@ This plugin is POSIX-only.
|
|||
Spec: standards.freedesktop.org/thumbnail-spec/latest/index.html
|
||||
"""
|
||||
|
||||
|
||||
import ctypes
|
||||
import ctypes.util
|
||||
import os
|
||||
|
|
@ -280,8 +279,7 @@ class GioURI(URIGetter):
|
|||
if not uri_ptr:
|
||||
self.libgio.g_free(uri_ptr)
|
||||
raise RuntimeError(
|
||||
"No URI received from the gfile pointer for "
|
||||
"{}".format(displayable_path(path))
|
||||
f"No URI received from the gfile pointer for {displayable_path(path)}"
|
||||
)
|
||||
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@ def _get_unique_table_field_values(model, field, sort_field):
|
|||
raise KeyError
|
||||
with g.lib.transaction() as tx:
|
||||
rows = tx.query(
|
||||
'SELECT DISTINCT "{}" FROM "{}" ORDER BY "{}"'.format(
|
||||
"SELECT DISTINCT '{}' FROM '{}' ORDER BY '{}'".format(
|
||||
field, model._table, sort_field
|
||||
)
|
||||
)
|
||||
|
|
|
|||
|
|
@ -12,8 +12,7 @@
|
|||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
""" Clears tag fields in media files."""
|
||||
|
||||
"""Clears tag fields in media files."""
|
||||
|
||||
import re
|
||||
|
||||
|
|
|
|||
|
|
@ -1,44 +1,152 @@
|
|||
Changelog
|
||||
=========
|
||||
|
||||
Changelog goes here! Please add your entry to the bottom of one of the lists below!
|
||||
|
||||
Unreleased
|
||||
----------
|
||||
|
||||
Changelog goes here! Please add your entry to the bottom of one of the lists below!
|
||||
Beets now requires Python 3.9 or later since support for EOL Python 3.8 has
|
||||
been dropped.
|
||||
|
||||
New features:
|
||||
|
||||
* Ability to query albums with track db fields and vice-versa, for example
|
||||
`beet list -a title:something` or `beet list artpath:cover`. Consequently
|
||||
album queries involving `path` field have been sped up, like `beet list -a
|
||||
path:/path/`.
|
||||
Bug fixes:
|
||||
|
||||
* :doc:`plugins/lyrics`: LRCLib will fallback to plain lyrics if synced lyrics
|
||||
are not found and `synced` flag is set to `yes`.
|
||||
* Synchronise files included in the source distribution with what we used to
|
||||
have before the introduction of Poetry.
|
||||
:bug:`5531`
|
||||
:bug:`5526`
|
||||
* :ref:`write-cmd`: Fix the issue where for certain files differences in
|
||||
``mb_artistid``, ``mb_albumartistid`` and ``albumtype`` fields are shown on
|
||||
every attempt to write tags. Note: your music needs to be reimported with
|
||||
``beet import -LI`` or synchronised with ``beet mbsync`` in order to fix
|
||||
this!
|
||||
:bug:`5265`
|
||||
:bug:`5371`
|
||||
:bug:`4715`
|
||||
* :ref:`import-cmd`: Fix ``MemoryError`` and improve performance tagging large
|
||||
albums by replacing ``munkres`` library with ``lap.lapjv``.
|
||||
:bug:`5207`
|
||||
* :ref:`query-sort`: Fix a bug that would raise an exception when sorting on
|
||||
a non-string field that is not populated in all items.
|
||||
:bug:`5512`
|
||||
* :doc:`plugins/lastgenre`: Fix track-level genre handling. Now when an album-level
|
||||
genre is set already, single tracks don't fall back to the album's genre and
|
||||
request their own last.fm genre. Also log messages regarding what's been
|
||||
tagged are now more polished.
|
||||
:bug:`5582`
|
||||
|
||||
For packagers:
|
||||
|
||||
* The minimum supported Python version is now 3.9.
|
||||
|
||||
Other changes:
|
||||
|
||||
* Release workflow: fix the issue where the new release tag is created for the
|
||||
wrong (outdated) commit. Now the tag is created in the same workflow step
|
||||
right after committing the version update.
|
||||
:bug:`5539`
|
||||
|
||||
2.2.0 (December 02, 2024)
|
||||
-------------------------
|
||||
|
||||
New features:
|
||||
|
||||
* :doc:`/plugins/substitute`: Allow the replacement string to use capture groups
|
||||
from the match. It is thus possible to create more general rules, applying to
|
||||
many different artists at once.
|
||||
|
||||
* Ability to use relative_to as "m3u" to set playlist files as relative to where each playlist is at, including subdirectories.
|
||||
Bug fixes:
|
||||
|
||||
* Improved naming of temporary files by separating the random part with the file extension.
|
||||
* Fixed the ``auto`` value for the :ref:`reflink` config option.
|
||||
* Fixed lyrics plugin only getting part of the lyrics from ``Genius.com`` :bug:`4815`
|
||||
* Check if running python from the Microsoft Store and provide feedback to install
|
||||
from python.org.
|
||||
:bug:`5467`
|
||||
* Fix bug where matcher doesn't consider medium number when importing. This makes
|
||||
it difficult to import hybrid SACDs and other releases with duplicate tracks.
|
||||
:bug:`5148`
|
||||
* Bring back test files and the manual to the source distribution tarball.
|
||||
:bug:`5513`
|
||||
|
||||
Other changes:
|
||||
|
||||
* Changed `bitesize` label to `good first issue`. Our `contribute`_ page is now
|
||||
automatically populated with these issues. :bug:`4855`
|
||||
|
||||
.. _contribute: https://github.com/beetbox/beets/contribute
|
||||
|
||||
2.1.0 (November 22, 2024)
|
||||
-------------------------
|
||||
|
||||
New features:
|
||||
|
||||
* New template function added: ``%capitalize``. Converts the first letter of
|
||||
the text to uppercase and the rest to lowercase.
|
||||
* Ability to query albums with track db fields and vice-versa, for example
|
||||
``beet list -a title:something`` or ``beet list artpath:cover``. Consequently
|
||||
album queries involving ``path`` field have been sped up, like ``beet list -a
|
||||
path:/path/``.
|
||||
* :doc:`plugins/ftintitle`: New ``keep_in_artist`` option for the plugin, which
|
||||
allows keeping the "feat." part in the artist metadata while still changing
|
||||
the title.
|
||||
* :doc:`plugins/autobpm`: Add new configuration option ``beat_track_kwargs``
|
||||
which enables adjusting keyword arguments supplied to librosa's
|
||||
``beat_track`` function call.
|
||||
* Beets now uses ``platformdirs`` to determine the default music directory.
|
||||
This location varies between systems -- for example, users can configure it
|
||||
on Unix systems via ``user-dirs.dirs(5)``.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* :doc:`plugins/ftintitle`: The detection of a "feat. X" part in a song title does not produce any false
|
||||
positives caused by words like "and" or "with" anymore. :bug:`5441`
|
||||
* :doc:`plugins/ftintitle`: The detection of a "feat. X" part now also matches such parts if they are in
|
||||
parentheses or brackets. :bug:`5436`
|
||||
* Improve naming of temporary files by separating the random part with the file extension.
|
||||
* Fix the ``auto`` value for the :ref:`reflink` config option.
|
||||
* Fix lyrics plugin only getting part of the lyrics from ``Genius.com`` :bug:`4815`
|
||||
* Album flexible fields are now correctly saved. For instance MusicBrainz external links
|
||||
such as `bandcamp_album_id` will be available on albums in addition to tracks.
|
||||
For albums already in your library, a re-import is required for the fields to be added.
|
||||
Such a re-import can be done with, in this case, `beet import -L data_source:=MusicBrainz`.
|
||||
* :doc:`plugins/autobpm`: Fix the ``TypeError`` where tempo was being returned
|
||||
as a numpy array. Update ``librosa`` dependency constraint to prevent similar
|
||||
issues in the future.
|
||||
:bug:`5289`
|
||||
* :doc:`plugins/discogs`: Fix the ``TypeError`` when there is no description.
|
||||
* Use single quotes in all SQL queries
|
||||
:bug:`4709`
|
||||
* :doc:`plugins/lyrics`: Update ``tekstowo`` backend to fetch lyrics directly
|
||||
since recent updates to their website made it unsearchable.
|
||||
:bug:`5456`
|
||||
* :doc:`plugins/convert`: Fixed the convert plugin ``no_convert`` option so
|
||||
that it no longer treats "and" and "or" queries the same. To maintain
|
||||
previous behaviour add commas between your query keywords. For help see
|
||||
:ref:`combiningqueries`.
|
||||
* Fix the ``TypeError`` when :ref:`set_fields` is provided non-string values. :bug:`4840`
|
||||
|
||||
For packagers:
|
||||
|
||||
* The minimum supported Python version is now 3.8.
|
||||
* The `beet` script has been removed from the repository.
|
||||
* The `typing_extensions` is required for Python 3.10 and below.
|
||||
* The ``beet`` script has been removed from the repository.
|
||||
* The ``typing_extensions`` is required for Python 3.10 and below.
|
||||
|
||||
Other changes:
|
||||
|
||||
* :doc:`contributing`: The project now uses `poetry` for packaging and
|
||||
* :doc:`contributing`: The project now uses ``poetry`` for packaging and
|
||||
dependency management. This change affects project management and mostly
|
||||
affects beets developers. Please see updates in :ref:`getting-the-source` and
|
||||
:ref:`testing` for more information.
|
||||
* :doc:`contributing`: Since `poetry` now manages local virtual environments,
|
||||
`tox` has been replaced by a task runner `poethepoet`. This change affects
|
||||
* :doc:`contributing`: Since ``poetry`` now manages local virtual environments,
|
||||
`tox` has been replaced by a task runner ``poethepoet``. This change affects
|
||||
beets developers and contributors. Please see updates in the
|
||||
:ref:`development-tools` section for more details. Type ``poe`` while in
|
||||
the project directory to see the available commands.
|
||||
* Installation instructions have been made consistent across plugins
|
||||
documentation. Users should simply install `beets` with an `extra` of the
|
||||
documentation. Users should simply install ``beets`` with an ``extra`` of the
|
||||
corresponding plugin name in order to install extra dependencies for that
|
||||
plugin.
|
||||
* GitHub workflows have been reorganised for clarity: style, linting, type and
|
||||
|
|
@ -49,6 +157,16 @@ Other changes:
|
|||
documentation is changed, and they only check the changed files. When
|
||||
dependencies are updated (``poetry.lock``), then the entire code base is
|
||||
checked.
|
||||
* The long-deprecated ``beets.util.confit`` module has been removed. This may
|
||||
cause extremely outdated external plugins to fail to load.
|
||||
* :doc:`plugins/autobpm`: Add plugin dependencies to ``pyproject.toml`` under
|
||||
the ``autobpm`` extra and update the plugin installation instructions in the
|
||||
docs.
|
||||
Since importing the bpm calculation functionality from ``librosa`` takes
|
||||
around 4 seconds, update the plugin to only do so when it actually needs to
|
||||
calculate the bpm. Previously this import was being done immediately, so
|
||||
every ``beet`` invocation was being delayed by a couple of seconds.
|
||||
:bug:`5185`
|
||||
|
||||
2.0.0 (May 30, 2024)
|
||||
--------------------
|
||||
|
|
@ -210,9 +328,14 @@ New features:
|
|||
* Add support for `barcode` field.
|
||||
:bug:`3172`
|
||||
* :doc:`/plugins/smartplaylist`: Add new config option `smartplaylist.fields`.
|
||||
* :doc:`/plugins/fetchart`: Defer source removal config option evaluation to
|
||||
the point where they are used really, supporting temporary config changes.
|
||||
|
||||
Bug fixes:
|
||||
|
||||
* Improve ListenBrainz error handling.
|
||||
:bug:`5459`
|
||||
* :doc:`/plugins/deezer`: Improve requests error handling.
|
||||
* :doc:`/plugins/lastimport`: Improve error handling in the `process_tracks` function and enable it to be used with other plugins.
|
||||
* :doc:`/plugins/spotify`: Improve handling of ConnectionError.
|
||||
* :doc:`/plugins/deezer`: Improve Deezer plugin error handling and set requests timeout to 10 seconds.
|
||||
|
|
@ -349,7 +472,7 @@ Bug fixes:
|
|||
:bug:`4947`
|
||||
* Fix bug where unimported plugin would not ignore children directories of
|
||||
ignored directories.
|
||||
:bug:`5130`
|
||||
:bug:`5130`
|
||||
* Fix bug where some plugin commands hang indefinitely due to a missing
|
||||
`requests` timeout.
|
||||
* Fix cover art resizing logic to support multiple steps of resizing
|
||||
|
|
|
|||
|
|
@ -11,8 +11,8 @@ master_doc = "index"
|
|||
project = "beets"
|
||||
copyright = "2016, Adrian Sampson"
|
||||
|
||||
version = "2.0"
|
||||
release = "2.0.0"
|
||||
version = "2.2"
|
||||
release = "2.2.0"
|
||||
|
||||
pygments_style = "sphinx"
|
||||
|
||||
|
|
|
|||
|
|
@ -542,6 +542,9 @@ Specifying types has several advantages:
|
|||
|
||||
* User input for flexible fields may be validated and converted.
|
||||
|
||||
* Items missing the given field can use an appropriate null value for
|
||||
querying and sorting purposes.
|
||||
|
||||
|
||||
.. _plugin-logging:
|
||||
|
||||
|
|
|
|||
|
|
@ -109,7 +109,7 @@ get it right:
|
|||
should open the "System Properties" screen, then select the "Advanced" tab,
|
||||
then hit the "Environmental Variables..." button, and then look for the PATH
|
||||
variable in the table. Add the following to the end of the variable's value:
|
||||
``;C:\Python37;C:\Python37\Scripts``. You may need to adjust these paths to
|
||||
``;C:\Python38;C:\Python38\Scripts``. You may need to adjust these paths to
|
||||
point to your Python installation.
|
||||
|
||||
3. Now install beets by running: ``pip install beets``
|
||||
|
|
@ -132,6 +132,19 @@ trouble or you have more detail to contribute here, please direct it to
|
|||
.. _install pip: https://pip.pypa.io/en/stable/installing/
|
||||
.. _get-pip.py: https://bootstrap.pypa.io/get-pip.py
|
||||
|
||||
Installing on ARM (Raspberry Pi and similar)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Beets on ARM devices is not recommended for Linux novices. If you are
|
||||
comfortable with light troubleshooting in tools like ``pip``, ``make``,
|
||||
and beets' command-line binary dependencies (e.g. ``ffmpeg`` and
|
||||
``ImageMagick``), you will probably be okay on ARM devices like the
|
||||
Raspberry Pi. We have `notes for ARM`_ and an `older ARM reference`_.
|
||||
Beets is generally developed on x86-64 based devices, and most plugins
|
||||
target that platform as well.
|
||||
|
||||
.. _notes for ARM: https://github.com/beetbox/beets/discussions/4910
|
||||
.. _older ARM reference: https://discourse.beets.io/t/diary-of-beets-on-arm-odroid-hc4-armbian/1993
|
||||
|
||||
Configuring
|
||||
-----------
|
||||
|
|
|
|||
|
|
@ -6,8 +6,15 @@ of a track from its audio data and store it in the `bpm` field of your
|
|||
database. It does so automatically when importing music or through
|
||||
the ``beet autobpm [QUERY]`` command.
|
||||
|
||||
To use the ``autobpm`` plugin, enable it in your configuration (see
|
||||
:ref:`using-plugins`).
|
||||
Install
|
||||
-------
|
||||
|
||||
To use the ``autobpm`` plugin, first enable it in your configuration (see
|
||||
:ref:`using-plugins`). Then, install ``beets`` with ``autobpm`` extra
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pip install "beets[autobpm]"
|
||||
|
||||
Configuration
|
||||
-------------
|
||||
|
|
@ -21,5 +28,14 @@ configuration file. The available options are:
|
|||
- **overwrite**: Calculate a BPM even for files that already have a
|
||||
`bpm` value.
|
||||
Default: ``no``.
|
||||
- **beat_track_kwargs**: Any extra keyword arguments that you would like to
|
||||
provide to librosa's `beat_track`_ function call, for example:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
autobpm:
|
||||
beat_track_kwargs:
|
||||
start_bpm: 160
|
||||
|
||||
.. _Librosa: https://github.com/librosa/librosa/
|
||||
.. _beat_track: https://librosa.org/doc/latest/generated/librosa.beat.beat_track.html
|
||||
|
|
|
|||
|
|
@ -27,6 +27,10 @@ file. The available options are:
|
|||
- **format**: Defines the format for the featuring X part of the new title field.
|
||||
In this format the ``{0}`` is used to define where the featured artists are placed.
|
||||
Default: ``feat. {0}``
|
||||
- **keep_in_artist**: Keep the featuring X part in the artist field. This can
|
||||
be useful if you still want to be able to search for features in the artist
|
||||
field.
|
||||
Default: ``no``.
|
||||
|
||||
Running Manually
|
||||
----------------
|
||||
|
|
|
|||
|
|
@ -11,13 +11,34 @@ the ``rewrite`` plugin modifies the metadata, this plugin does not.
|
|||
|
||||
Enable the ``substitute`` plugin (see :ref:`using-plugins`), then make a ``substitute:`` section in your config file to contain your rules.
|
||||
Each rule consists of a case-insensitive regular expression pattern, and a
|
||||
replacement value. For example, you might use:
|
||||
replacement string. For example, you might use:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
substitute:
|
||||
.*jimi hendrix.*: Jimi Hendrix
|
||||
.*jimi hendrix.*: Jimi Hendrix
|
||||
|
||||
The replacement can be an expression utilising the matched regex, allowing us
|
||||
to create more general rules. Say for example, we want to sort all albums by
|
||||
multiple artists into the directory of the first artist. We can thus capture
|
||||
everything before the first ``,``, `` &`` or `` and``, and use this capture
|
||||
group in the output, discarding the rest of the string.
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
substitute:
|
||||
^(.*?)(,| &| and).*: \1
|
||||
|
||||
This would handle all the below cases in a single rule:
|
||||
|
||||
Bob Dylan and The Band -> Bob Dylan
|
||||
Neil Young & Crazy Horse -> Neil Young
|
||||
James Yorkston, Nina Persson & The Second Hand Orchestra -> James Yorkston
|
||||
|
||||
|
||||
To apply the substitution, you have to call the function ``%substitute{}`` in the paths section. For example:
|
||||
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
paths:
|
||||
default: %substitute{$albumartist}/$year - $album%aunique{}/$track - $title
|
||||
default: \%substitute{$albumartist}/$year - $album\%aunique{}/$track - $title
|
||||
|
|
|
|||
|
|
@ -36,9 +36,10 @@ fields to nullify and the conditions for nullifying them:
|
|||
For example::
|
||||
|
||||
zero:
|
||||
fields: month day genre comments
|
||||
fields: month day genre genres comments
|
||||
comments: [EAC, LAME, from.+collection, 'ripped by']
|
||||
genre: [rnb, 'power metal']
|
||||
genres: [rnb, 'power metal']
|
||||
update_database: true
|
||||
|
||||
If a custom pattern is not defined for a given field, the field will be nulled
|
||||
|
|
@ -60,4 +61,4 @@ art from files' tags unless you tell it not to. To keep the album art, include
|
|||
the special field ``images`` in the list. For example::
|
||||
|
||||
zero:
|
||||
keep_fields: title artist album year track genre images
|
||||
keep_fields: title artist album year track genre genres images
|
||||
|
|
|
|||
|
|
@ -276,6 +276,21 @@ Either ``yes`` or ``no``, indicating whether matched albums should have their
|
|||
That is, if this option is turned on, then ``year`` will always equal
|
||||
``original_year`` and so on. Default: ``no``.
|
||||
|
||||
.. _overwrite_null:
|
||||
|
||||
overwrite_null
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
This confusingly-named option indicates which fields have meaningful `null` values. If
|
||||
an album or track field is in the corresponding list, then an existing value for this
|
||||
field in an item in the database can be overwritten with `null`. By default, however,
|
||||
`null` is interpreted as information about the field being unavailable, so it would not
|
||||
overwrite existing values. For example::
|
||||
|
||||
overwrite_null:
|
||||
album: ["albumid"]
|
||||
track: ["title", "date"]
|
||||
|
||||
.. _artist_credit:
|
||||
|
||||
artist_credit
|
||||
|
|
@ -600,13 +615,13 @@ Defaults to ``no``.
|
|||
|
||||
This kind of clone is only available on certain filesystems: for example,
|
||||
btrfs and APFS. For more details on filesystem support, see the `pyreflink`_
|
||||
documentation. Note that you need to install ``pyreflink``, either through
|
||||
documentation. Note that you need to install ``pyreflink``, either through
|
||||
``python -m pip install beets[reflink]`` or ``python -m pip install reflink``.
|
||||
|
||||
The option is ignored if ``move`` is enabled (i.e., beets can move or
|
||||
copy files but it doesn't make sense to do both).
|
||||
|
||||
.. _file clones: https://blogs.oracle.com/otn/save-disk-space-on-linux-by-cloning-files-on-btrfs-and-ocfs2
|
||||
.. _file clones: https://en.wikipedia.org/wiki/Copy-on-write
|
||||
.. _pyreflink: https://reflink.readthedocs.io/en/latest/
|
||||
|
||||
resume
|
||||
|
|
|
|||
|
|
@ -60,6 +60,7 @@ These functions are built in to beets:
|
|||
|
||||
* ``%lower{text}``: Convert ``text`` to lowercase.
|
||||
* ``%upper{text}``: Convert ``text`` to UPPERCASE.
|
||||
* ``%capitalize{text}``: Make the first letter of ``text`` UPPERCASE and the rest lowercase.
|
||||
* ``%title{text}``: Convert ``text`` to Title Case.
|
||||
* ``%left{text,n}``: Return the first ``n`` characters of ``text``.
|
||||
* ``%right{text,n}``: Return the last ``n`` characters of ``text``.
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue